var/home/core/zuul-output/0000755000175000017500000000000015137426453014537 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015137435252015500 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000256774115137435173020305 0ustar corecore{:~ikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs$r.k9GfD . ᳒i.67[U/[Zo?E^/y˛?5o5-bֳ<ξ>|Ƭ>Uח۬eyϫ7N۫㻯7bz1[/y}U~(+2'rs\mw6鮾f?&~|3_L2_f_ṴHJ2E$(Ͼw7 +]W7t;[V$+Wxi2?<{9<;>'m_VͬkmVN:`SLi.`|!>ڌj+ACl21E^#QDuxGvZ4c$)9ӋzYWnyMfNQWs]8M%3KpNGIrND}2SRCK.(^$0^@hH9%!40J˹%>*@G83%7ל%scX@ .9s%9&M'i7wFP|؝5g.wk"5-՜c>QviYDZh$#*)e\W$IAT;K0Gp}=%ڠedۜ+EaH#QtDV:?7#w4rھxiJz硂Ϧ4Co9=]٣Z%T%x~5r.NL0}ٳv0^͐ J+D`7Pa`,u{S'ZВy=M2ֻ8GC/zE'`9ƭZre(&e'd,LFlPh ۬rw%wW-+b8_b|r wFuRI%TѬza\_3/wn>- MRyC jnz+(%2݇^^J"ͦ>CMMQQ؏*ΧL ߞNPi?$;g&uw8~Y >hl%}Р`sMC@ztԝp ,}Nptt%q6& ND lM?ָPZGa(X(2*91n,5o0/-y+})')SĔv}S%xhRe)a@r AF' ]J)Өb~CףNjʵ:P+-/< )VCRQrC|}_~ܥ0~fgkԇAt8>uJڡcp4A{C U =T+}gr|CKrυ] g"tta[I!;c%6$V<[+*J:AI \:-rR b B"~?4 W4B3lLRD|@+.+s~t?53!}~V[F)RH?uvͪ _5l m*7h?cF_3T0|9ē7$3Érhx^-$Zøu32EʉD'MSJp?2i V4nQi1h$Zb)ŠȃAݢCr|<VxTlHz `J{*.Jw2?؃ O _E"#1?hIQ\%$:b$spd.GnUC9fDx5X@O5OޔL<'Adp[;1DL^5"Ϧޙ`F}_5XDV7V5EE9esYYfiMOV i/ f>3VQ 7,oTWNJOAqO:rƭĘ DuZ^ To3dEN/} vAK?|Uz5SUZa{P,97óI,Q{eNFV+(hʺb 7ʞXlýcsT gq 0C?41.6-ߜn^ylSO2<쿫lIc*Qqk&60XdGY!D ' @{!b4ִ s Exb 5dKޤIߒ'&YILұ4q6y{&G`%$8Tt ȥ#5GVO2Қ;m#NS80J=l}._?M<lu Y> XH\z:dHElL(uHR0i#q%]!wn<_, n״~* ^g/5n[lGhNU˿ۜ6C9OC7Osnkje*;iΓF[^>؃n糨I@[ tWv Few9J֥WmN^<.eܢMρ'JÖŢո%gQ=p2YaI"&ư%# y}ùXz!bm5uAߙXc90ov,{*9ߎ% qƦat:F=uNvD=mdZB4']a.QO:#'6RE'E3 */HAYk|z|ؾPQgOJӚ:ƞŵ׉5'Hdg;md^6%rd9#_~2:Y`&US tDkQ;>" ؾ:9_))wF|dߗXiTcLQMhg:F[bTm!V`AqPaPheUJ& z?NwGj{VjQS,؃I'[9;|y]?z碨T u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaP|n_5q @3ǟ6 mR!c/24مQz wX vYPkv}'7d*1q* c0.$\/XND]P*84[ߵQJ޸8iD WPC49 *#LC ؾzCwS%'m'vn[<73ĐrX(K&Y5+$wL#ɽ 4d-bbdAJ?w<:X!nV2] e}gjFX@&avF묇cTy^]m -ĖUP-\!3^.Y9[XԦo Έ')Ji̯H4~)([KC&{g6R/wD_tՄ.F+HP'AE; J je&ϟ'ČR! I,($F{ձ7*Oy 6EK( EF #31J8mN .TTF9㕴/5~RxCe,&v3,JE- ZF5%X ?A?gU3U;,ד1v6s푻jÝ$ >8ȕ$eZ1q}lrCy u)xF$Z83Ec罋}[εUX%>}< ݳln"sv&{b%^AAoۺ(I#hKD:Bߩ#蘈f=9oN*.Ѓ M#JC1J~]ps/9܎ms4gZY-07`-Id,9õ԰t+-b[uemNi_󈛥^g+!SKq<>78NBx;c4<ニ)H .Pd^cR^p_G+E--ۥ_F]a|v@|3p%kzh|k*BBRib\J3Yn|뇱[FfP%M:<`pz?]6laz5`ZQs{>3ư_o%oU׆]YLz_s߭AF'is^_&uUm$[[5HI4QCZ5!N&D[uiXk&2Bg&Ս7_/6v_cd=d@eU XyX2z>g8:.⺻h()&nO5YE\1t7aSyFxPV19 ĕi%K"IcB j>Pm[E[^OHmmU̸nG pHKZ{{Qo}i¿Xc\]e1e,5`te.5Hhao<[50wMUF􀍠PW矬3yb.63>NKnJۦ^4*rB쑓:5Ǧ٨C.1`mU]+\+܁<lW Gϸ}^w'̅dk  C 7fbU{3Se[s %'!6%d+, Z`ͲH-nမ^WbPFtOfD]c9\w;ea~~{:Vm >|WAꞭi`HbIãE{%&4]Ig Wjoru ݜmKnZ<X; ۢ( nx K8.|DXb +*598;w)zp:̊~;͞)6vnM!N5Cu!8Wq/`FUwWAֻ,Qu W@ Fi:K [Av*_918]a:pmQ&'ᚡmi@ zF(n&P;)_]µ!doR0`pl`~9Fk[ٺ+4Hhao-jϸ?x7F ѡZ af#rjcl ^2B│x@Bq"M/lja\b݃af LnU*P(8W[U6WX ZoѶSH:K:%Qvl\b FqQI.ȨHWo;Nw$͹O$oEE-eq=.*Dp,V;(bgJ!gF)892s;=^ۭ7h9 lr_qSq-XbsK، JBJbeOfOO}}<ʬ-'vlQ]m"ifӠ1˟ud9)˔~BѤ]һS8]uBi( Ql{]UcLxٻa,2r(#'CDd2݄kTxn@v7^58þ Ţ&V"J~7+0_t[%XU͍ &dtO:odtRWon%*m~(fnc.^xt4gD638L"!}LpInj2ɘCGOa9C1L PU:LNTPlI&N:oճM\Qe%*?vQ~W  yr3-2+=Щp!k2wu_~c9'\ॣwx"k%oTͯ܈'i1Jh`(D"y@ "0#7=OP^b5K 0Bt&n2hev/nw 'hEY9[Nj_vZ :jJ2^b_ F w#X6Sho禮<u8.H#',c@V8 iRX &4ڻ8Ǘ{]oOtsϑ`94t1!F PI;i`ޮMLX7sTGP7^s08p15w q o(uLYQB_dWoc0a#K1P,8.L(f"-Da +iP^]OrwY~fwA#ٔ!:*땽Zp!{g4څZtu\1!ѨW(7qZcpL)ύ-G~^rFD+"?_h)yh=x>5ܙQ~O_e琇HBzI7*-Oi* VšPȰһ8hBőa^mX%SHR Fp)$J7A3&ojp/68uK͌iΙINmq&} O L-\ n4f/uc:7k]4p8wWLeUc.)#/udoz$} 3V6UݎvxyRC%ƚq5Щۅw* CVo-1딆~ZYfJ"ou1ϵ5E bQ2mOΏ+weaxxOq:ym\q!<'J[FJ,4N:=6. +;$v6"I7%#CLTLyi{#ɠ^^f3Nd0 ~n~YͤBoK&9<{̻*RmသLΕbDOJx߭&~+WrVXӼSZEY|RyZc]/mm}àpGg.S[@AeE{0մ{Ÿ&^Ut (6{\٢K 5X\*wٗYS%g,0\ Rk k8P>x7v21՚H :[Γd!E'a4n?k[A׈(sob 41Y9(^SE@7`KIK`kx& V`X0,%pe_ן >hd xе"Q4SUwy x<'o_~#_?`i3GM"mZ-p)v‡~#mo39YBaZo@/xi*@ t-ö]um_.+^ Sɕle$yM`. #N8أzgۏjk_c51̃^cn ba-X/#=Im41NLu\9ETp^poAOOgJ8@o2k'Hr~4Z(I8!H G8HNW%1Tќ^?xBVQXodՔz q[*ڔC"1Ȋ-R0ڱ}oF4 3vFf#8^Vє+k@ :)@%9@nA B q 62!/ 6G (" u:)fSGAV(e֖t܁ ft~c.!R0N<R{mtdFdHÃФsxBl] " Δ<=9i/ d ␙F9Ґ)Hnxps2wApP!se]I)^ k?'k:%Ѹ)?wɧ6a{r7%]_Ϧi~ԞnZhubW*IakVC-(>Z#"U4Xk1G;7#m eji'ĒGIqB//(O &1I;svHd=mJW~ړUCOīpAiB^MP=MQ`=JB!"]b6Ƞi]ItЀ'Vf:yo=K˞r:( n72-˒#K9T\aVܩO "^OF1%e"xm뻱~0GBeFO0ޑ]w(zM6j\v00ׅYɓHڦd%NzT@gID!EL2$%Ӧ{(gL pWkn\SDKIIKWi^9)N?[tLjV}}O͌:&c!JC{J` nKlȉW$)YLE%I:/8)*H|]}\E$V*#(G;3U-;q7KǰfξC?ke`~UK mtIC8^P߼fub8P銗KDi'U6K×5 .]H<$ ^D'!" b1D8,?tT q lKxDȜOY2S3ҁ%mo(YT\3}sѦoY=-- /IDd6Gs =[F۴'c,QAIٰ9JXOz);B= @%AIt0v[Ƿ&FJE͙A~IQ%iShnMІt.޿>q=$ts,cJZڗOx2c6 .1zҪR "^Q[ TF )㢥M-GicQ\BL(hO7zNa>>'(Kgc{>/MoD8q̒vv73'9pM&jV3=ɹvYƛ{3iψI4Kp5 d2oOgd||K>R1Qzi#f>夑3KմԔ萴%|xyr>ķx>{E>Z4Ӥ͋#+hI{hNZt 9`b˝`yB,Ȍ=6Z" 8L O)&On?7\7ix@ D_P"~GijbɠM&HtpR:4Si גt&ngb9%islԃ)Hc`ebw|Ī Zg_0FRYeO:F)O>UD;;MY,2ڨi"R"*R2s@AK/u5,b#u>cY^*xkJ7C~pۊ ~;ɰ@ՙ.rT?m0:;}d8ۈ ݨW>.[Vhi̒;̥_9$W!p.zu~9x۾vC;kN?WƟ+fx3SuKQqxST Ζ2%?T74a{N8;lr`$pZds=3jwlL Eڲ t|*n8[#yN SrA GYb8ZIaʼn8 #fg3i`F#5N 3q_M]j 8E!@1vցP7!|+R@;HspSI]ڻCZUcg5pDcIϹ,oN-_XI,3\j ]ٟ5~' SuipA!C厐$&k7dmhz/#"݃,YqCL$ڲ`"MUbeT>Xuv~4Le͢ }UVM)[A`b}mcE]LCEg=2ȴcmZ?E*-8nhױ1xR2ϫCya` A y!?h!9yL%VLU2gr26A!4vbSG ]ꧧWp/ &ee *w$-`J\ ptǣC^p#_`{ К8EW>*(D{ٛ,[fnY𱹞M=6&$<,"lX-Ǐ_whaE 98 (oѢ/Р΅ 7ցl6618ł_1/=fu).s¯?.S[{'g=Ҥ):d8h\y6]t1T7IUV:;.1& ,5΀j:<< +Y?58In'bXIǣO{&V\DŽ0,9f O_"[l:h¢8wݓ19\:f6:+ .3}=uvKc ٹeS<>ij(o'ciS<{1$E[nP b?8E'xv[K+E{,Qƙ1*dcs_Z'407|qBOgYU|U--sG8`u! qGYܷw;ȌCPc_|(RaIBKb+{P.T! =ĦiTob d<>SHr][KqWs7ѝBYǭ~RR"p9dFg|K- obY_vM 4>/]e/dy,8!xŋ5 R<^mYo 3c9(F?he:9[_v~\:P ؇'k01Q1jlX)/ΏL+NhBUx~Ga>Z"Q_wjTLRˀtL L+BT҂ll魳cf[L̎`;rK+S- (J[(6 b F? ZvƂcW+dˍ-m𢛲@ms~}3ɱ© R$ T5%:zZ甎܋)`ŰJ38!;NfHohVbK :S50exU}W`upHЍE_fNTU*q%bq@/5q0);F74~'*z[\M-~#aSmMÉB2Nnʇ)bAg`u2t"8U [tJYSk, "vu\h1Yhl~[mhm+F(g 6+YtHgd/}7m]Q!Mę5bR!JbV>&w6οH+NL$]p>8UU>Ѫg39Yg>OF9V?SAT~:gGt $*}aQ.Zi~%K\rfm$%ɪq(%W>*Hg>KStE)KS1z2"h%^NEN?  hxnd/)O{,:خcX1nIaJ/t4J\bƀWc-d4M^d/ ʂK0`v%"s#PCoT/*,:[4b=]N&, ,B82^WK9EHLPm))2.9ȱ  QAcBC-|$M\^B!`}M^t+C~Lb }D>{N{Vt)tpDN,FCz~$)*417l;V iэ(_,j]$9O+/Sh]ice wy\Mڗ$,DJ|lj*à␻,?XAe0bX@ h0[}BU0v']#Vo !ې: Z%ƶ(fl>'"Bg< 0^_d0Y@2!ӸfZ{Ibi/^cygwדzY'Ź$:fr;)ٔf ՠ3Kcxwg*EQU{$Sڸ3x~ 5clgSAW"X Pҿ.ظwyV}̒KX9U1>V..W%GX +Uvzg=npu{do#Vb4ra\sNC/T"*!k愨}plm@+@gSUX覽t01:)6kSL9Ug6rEr(3{ xRP8_S( $?uk| ]bP\vۗ晋cgLz2r~MMp!~~h?ljUc>rw}xxݸǻ*Wu{}M?\GSߋ2ꮺ5w"7U0)lۨB0ח*zW߬V}Z۫ܨJ<]B=\>V7¯8nq~q?A-?T_qOq?5-3 |q|w.dަ'/Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4Ăw4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlvLҼé1ivGgJ+u7Τ!ljK1SpHR>:YF2cU(77eGG\ m#Tvmە8[,)4\\=V~?C~>_) cxF;;Ds'n [&8NJP5H2Զj{RC>he:ա+e/.I0\lWoӊĭYcxN^SPiMrFI_"*l§,̀+ å} .[c&SX( ( =X?D5ۙ@m cEpR?H0F>v6A*:W?*nzfw*B#d[se$U>tLNÔ+XX߇`cu0:U[tp^}{>H4z 4 (DtH-ʐ?sk7iIbΏ%T}v}e{aBs˞L=ilNeb]nltwfCEI"*S k`u ygz[~S [j3+sE.,uDΡ1R:Vݐ/CBc˾] shGՙf 2+);W{@dlG)%عF&4D&u.Im9c$A$Dfj-ء^6&#OȯTgرBӆI t[ 5)l>MR2ǂv JpU1cJpրj&*ߗEЍ0U#X) bpNVYSD1౱UR}UR,:lơ2<8"˓MlA2 KvP8 I7D Oj>;V|a|`U>D*KS;|:xI/ió21׭ȦS!e^t+28b$d:z4 .}gRcƈ^ʮC^0l[hl"য*6 ny!HQ=GOf"8vAq&*țTOWse~ (5TX%/8vS:w}[ą qf2Lυi lm/+QD4t.P*2V J`\g2%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨Or4 &+Bs=8'kP 3 |}44S8UXi;f;VE7e4AdX-fS烠1Uܦ$lznlq"җ^s RTn|RKm;ԻZ3)`S!9| ?}m*2@"G{yZ${˪A6yq>Elq*E< NX9@: Ih~|Y4sopp|v1f2춓tcZ{F_e``;1nxG3`I6% _U$%ˎm*&QN}NW5:y$F /#ŭ( @dIOUaYc)x+׃d[ƏUX"~X(̳<,]icS2e7OHaQܭ~ntegYr< <$*Li{M|=ѪA|(q6} eZK G#˵ug77Gtnj* DH~}y/=c-1[^cjְՅ"wGBѨL_'NEUֽXb gkΠ^71)l?t`u&|gf,a2̼":r?YizCSZ(=V ^OSdy\*e"m[3<][c9ukn>ә lslmԃ-QVCn:O&b3CF ȣ dR\"xy? _&֎B^K!Zu"`[$E MAyX*24E^\)/3(j2,&E OeZE >9AB1)r&PX"^}|e_E^L's)'˲̊׳{^1+EqYY-ֺdyDM4002}t+-)eU^%W!'9knYܟ4('x_twE(\ J(&AUV@~^NěGi,MEi`]`pB!8%b~A.,'6i y B$^O-%<iy492ܞl{S]55ۜ|9.ל'O)A=ep2X%n>2ɠ&0kHrƕv٨P 5KA}otfڌ.b3c4IʼHz;+y&O"a;#JyB;44"6ߙi~?A&ayC"㰝ɜem݊.\]0e\r^›L 6,d1ywt8J+p.?<=/˼B_Z$H>ͳIv\֣#1w7@Io7>ծۙ,УMA Y^w.HO7'ǻ2W.xZ3\< K7 wrxG秧_(:}sV[j!}{ t}<=9&qv7!pԯ瘝;7aY{pfވHC#83Ơ(k'$,ߥRzǛ$k"VD4 ˲Qw#[z2lWkz+͂MnǔkXH@L8u&>< B`U$uf뺶#& *]#x:pAt?\?Glxb[ *g{w?}b6Kx)L1Tj1 =I<<`ER׉ndC W&& |E0WC1-r쿧xr \|wְw_=?&Hjb@>q^efp13GUXX DP`{B*:w@Acj;WWč^?p7%>MVGаuցeQ[R&R<ן#Q 6UrF:iZ*_>A>4KD^n9|C, 1օhJ+"Is dsw:s$0=O FmhjW1F,Dlɫ^%%KJT2;f{uֽ\W`AC슍fֶ",.߾jc;}*> b]eX\HV'\ *1 AdR `I(*Vg`aH]󵏅RDC[bc=C5nP,#BPu6ڇhnKBi~Uf NbWyWfpw9+K!8SfS)6*xՠ] y;jW}-{|>EK/%ʰUS+KkQ$5\URn:]-4V +?0Tf\zO]4΃G|M[kESڃY`?W˔>Uۊ꿋Z xSr. fLӍQ\ie랫ie VLZ n riy/@XZCiՉ^rrhmkFP+Iv@1DM:7ҕd1q됒&,4H.r} o)*jqw]kDv]%-uns4<פli`FӬ*ϐnk~j4z +(]j .ropݎwũy7Hְi2- ,VF#$R{ዴ* ͮ]L4֎r܈ 2 lᬊ!w;0ו0:g|31Ãu?f k[My2QW)BEK6]iXzrcy{tI;X 1,IsD5iR>eVWI;92"Dx Q-z ɪC+-e,*fgM_и泸 QTe2zxRTaNû!RjI7CHfJ]YO4 zceߕ RrIKrY+3i]җ\*N1@$WJ=k{7?2þ4nN?n1qї3N~VT@r)n6;!:m•: e?p 4:-@>bk# @d9T%ܠ a.\߫Q ׎x{Ҝ4q@ޗ ,!3]ep+/q~|SC$tOuE4X5>ܠ]  /rT {Flc8?Qk: W&mnf@1pK3LBW 8#n P F< 22~4/*G𐏽ceƱ6qƙj40o7jZpHׇ XKZn,Cۏ@}^tY!X yv\vjO ީcr"c'D63&P"lAX Ҽ-|Dd2%-61FAhx9|pqp~\eݗ<޵ml2p/{C0 yhI@Il$QKJvA3CR#KV% 3m&33gy}͙Q0zR]Sg#@yT h&r 4b>?f #@xFgf8'㚧 gP eHNME!QUp-N%?O7/_WO䵴|}0nLUQDuyͪ? 3iNalKuaϳiMB1u7.!~5%_`*LNJm$*/3.`qxCmgE 1h=_+ +{LLjz]U/b`Kkͷa{DA#A@VuvcxkpʧO~5Ak1O((:H `5顳[݈( G>b`k1xΏ` .Z~;ւdVU#XB=7FPA HxHPkn-TaR{k.ʷP': 'jm[tˍ @sO 0L2 ۍY?Pl(uE#B; H`wËt%Wpjf  # b]bw!g!vqc!"x|ptr>oKe*`^G.n<0>h\8Ώh9@V0\ Z6wsaӦ 躗*wl0pu{w'8NG)t;^7Z-|98}P.M[Ep7ݸq0I;b 79z;t oaRϟޚU8ιq:=t{Lt=N;fA3Hn#@[=߰x7m;>&-G\quP1bGvkoph1׏-G`\|޻nG\y`r'0؈i@w$ 5 Dā&eˮL. cNAwGxd!6jC=u˙Xh/Q#ߑ"bŎ {&9]-~g/' ׼ܖCw2`H[b$>6#\9MeTܛ@/֕DW8qƁ%{2|BzۨJ;}߭Xr܊Y: :zH6YHHu J(o:(i V4ܚ#@؀'f:EyڇJ ,+fe3 b;B'ɳ%qI%c8sVuBN^(Dپ9|Yۄ FBSE&j#WY}d!-jKݧLݣ%WS}u]-4#p`)PD]*fxC`IA8;KPgD]8L)UqWURݣ^y_Ӊx(*rXeÇnmMȊYM5ky,aAnHT 8kX Mge-oh[ɐ $ |fEmO&;+ 3{*Iu,%ydM'R ٨$C8HO4 c sXIfj3䭵"VaٵZ<$Fl#@T *ZSW[9VjUd?͛M>\Вi {}8U^# }vf[UF"} W|rrtxG||]LN0~a7%l>KYdx%)?>9K'yqg1/C]X@wMLܼ%q|"o=;z׮ZEoy~6e՟9zpşrh(r$.%A[wFګ"F}g# }'z܍8ɥ(Ô3Ihj9QZ"u5Gє;0Ql?(G³ȳRg9X:I$^DZ&h)^ ,_dixD]y6l8&ݷ0iO jԳQY=< nIJk4`N@ ޗ֕G@a([zC!8I 0v\=((쌻z׺ |O &?Lhsb2 ɸ74 |Oȏ8?h&egB`)YREb*P[jmv@aßUmEP[jo C3B?~t۩k2[iB'B٣cߖV쭎PF ]g9%a&ͮ/[- %ӆ-ڰEFwV&M!6A]PǽMEy7USHr }m ҞfSEۤ/?/E>7UB J$B!)cP遣JVӸ>؝Fv=tl]FRv N+;T'lB*|B{w T*v TN8P'bBUBuv'9PgOBuW uw ݝP0B= uw [%ہPowB$ہPPB #ߓPBUB v'48`OB .9(e _+H-wS"]ҠXCh- -gՃmVZN@M:Gީ*T} VZ2-gZ vWKCl\ۖ`^߱+W@yM]Y9Hʔɦ/(3fԆ&/p i$R]{p~`T)Lyͮl, xǩPzd0+x䨇yh8Wß2My3Ai*q=%"[f!oeKS XoemtGG2¬D݆C M$_eB"4a nYD8^4ߏ {apIRY~9tK5 PL[)|%^hAl9" Uj]mYُ=7V;,"W} H90O-(ZaT,˒Ň+n6r:ʋ8q\P,Od?Wx; nIZDc `V+gȋt!./an<8yu)٬a\#!6~/%*͢>roWNJWi6gT '3E-!M\ǔHK xmT}F_o8~q=0hIg K4/*èS}+i\N&D]HM9) f (0ۗ V}`ҥuy ]"-%{u8Nf }N%MglD'{HׯKeB7 Uv Um n",R>Vr3D9M^-uA ZH]C`4.΁Ȝ h=I~7cP-NQlE>0Ƣ'}\b9 hqH36A0XEqm8c):И_5q@_)cpY{;:!U͜tx#)D=߳3\JA 6u\sɏzv_ 3T48*[g/B cN 40y T22aQo1;W5yb5Uں枭e/;@}g$Wзb>u-UyU 2#xuI\5U!L tVu.àȊ4RF|tqc=[ЩHw;@gD5d3QBLUCE"xJV& Q,FX^xkv|bP&㍂>oqBvt?µI:\\u PHfG0/>}x)pۡ 8R;zdb*4 j)#}Igў^,m_5) cwtB45?~c?ڀ8v^&Qr5%C5'3\V^U"X><"iMelnJOF{Z@,JH:luܝĔ!-ũ.aԡw!c~E/uR.b̄6ZFp|=$@TأՒ"!5}PVB+Ӯ>TSFbگR=f4Gfݨ4s5Flن'-}Tǐ6{͛o6udE_C?Zhz5sb/ǯӯ_}y|e?~ ibU>3]ud꧿v>\[9n>\ 'Ecy]\]^ʇ=(n^{&;^eNxR^pqk8uw>kA\'?G᫟_ඨ׽tD;ykS> ߝ\ii0r_CwLSCc'_ڳ72\*?vHӆ=X.0-Ka{/E9u1jR=F2m-,ǞOx+#>á Ibk8E)&2G#qDN>8ҢfL,TaMn951A3~H 2 X+0Ԡsl0ڈsfIc&5 ZSdTTpc3 i?X]=n~{Zx O-\a`:S.[x\~N,1v Q~k7s#GPEId':u0,U0נLሑtcj2=x e3#Ȫ:Bt#F?!ء8! qK#F[ 倳/I;H;٫2Ķܸh+z՘1/}a=pVsat򱏭G9nfefq|}xarOU z>"KS/LhAuJbJ] ( \cİ10\p}ݒ({5)i+pc1S Xj=߸!q#̘tCccexV}+MpWW A`I zxkx_. BՅkHAͰ"U&3xjyK97V]hq#~IGX4hJҼĔ' `İ}‚(!Ǧd(YKuM[3yY\iSJ@fj[$nY(`ipE( IgA'N#"Yai#5CA1AP+Uӌ*obLZ PⓋi WΡI> !dJ$Wiex\ ̂tksB[ 1:~Pzp6x>DQ`EchpaYjJڂHӭGݛNo]ڏVVb^J=~`Đ^3#e{F-m.R.OHb\7z6鐰8?ճ<~+'V(2nA DbVEISD~^ɱxNG*29[c@∑t)R\=Uc8uItF M_ ܡH:;(kjҺy G6ڛQ%1b&4*Ӭb|9&km {U­pɊ!mbHz=բfUI>B*#c&C}3qʛmqU&JfxӓOd޶GNgR`: <ԖS'8TL`!ySHCHt/vRG8cr>-|$vKp>mIN$SM]t İ#w[)c %-xgjZ$ 7uDp Td}# G5EƄ¯q[mGkg_H\un%mmE\ NdIHbwvWDw.3BMAҚ $)a??nHpnVSR/JZlv%>3$C>e=xκi\חcʔkKp-E Q-BŻj"̊BiKcXnð-$.kB#x!"{DC+);,!Bigq͊,ŸlT$X5f3[{KJF0\:c$S[gz8#R.i њ-dlOFyZyüq،5wㅘU<τ#s^m,Ip4-/ %g]1~p|8 ;jssKc3aV3}ܑZ#j _zg3{׃-A1a0ܥi'zZZ}FZ'O2X`&DI8 FL>h\hˇcx, ޴@\[nC (ɞK]$s4XOݧ{@Q&TChbDVW36mIg.LQK 7j`-:NIH SsCLyV{m$<˃97OHwh=8qRByM#qno9LkHgwMVb]MIVndl7C”NfcQjӞHPJ(#4kF ,, 86]Dht=:~xF)2k}-HV xtŰEm9+ǻF۬`\06ev}ՃӑZ>HZ.$ZBΪ^b3P#S+M*޷~!qf7VKͶR1f0ϣX9F?.VNXr+[&!of5?vNƮ}*ܓ* r kxe$]$ N< NIfW;ez'v~z~BsHzf%>=Q%'CL. 1miLb+YjB,Cyܒ8 yjCq2 ~{ۨmvt7& V+,s0&֤)k\n?={pn+24`V܄#V޹ܗ>m;,>npBytߤ T!TL#op _;bJXqjqP[tԛ^q,xKpQUf[%pZ*:R_;sHp^(Km<|wʳg!8T2GȎ_$W^rʻ%q'1H>+^,룼$QYEw[-M . ^Ew}Go2LUvopCs> M~+x&Fxn $9&FR=%fo 7Tօ 'Oïpbupch< *x_DtK_ FϣϘRF qܼiU~,B " f!p*(>9pxLr B@B9u5$8ZY㧲رz4S^ yS)R9z8H"d,+oK:>GgF:tED.o @CC؄ݦlJȔ@Gܾ:1h\iZGMo,rz5r$ԦɓY K[x` VjxR9ci0G-0/xQĺ3CQQ[=xQ,+0j`?zpidMvpծ8#rJ g4˾ uD:IcoIy\¬: 3:8.81$hxg#q쥝7-ڸ?DA{m(p.N5Q*eJͪEQ]gKI&^,6rT0ӥt88BiS%\])ݭ k'u]/Z`:V)*Ъ/uiC0lqGT^Ny)QE Ľ].YB_TU }Z ey,#&L"ȖEr򮿻w}jgT6)*DML+0)dZI4(brI`"nVM vϷמG%rBe# ^+98nw{;S -/q֬;ۙ\'chAϪ9 C,jyoJxܴry9èF@p0VNgZ{ȑ_iع3{#c0sw60&#n,yHƻEInv>8M"Iw-Cwyb #UW)m<t/foQ7};YYoYW( >sk] |ŝī`(} !͵ڎ=̆ZαK*F,br]el1®D@Ϳ|ԏ=1f;AJy0Ym;E&Q0OgKДhZ&GQ_sVHh_1IZb.ǍX ]4|FRNTw ;,-N/FF褘q:7fcL$}=i &d5(׫: GC߬λ;#DGA14yg pv/EXa1-̠7.\= ?GME"&gQnR\N#ML ~;ƣȎǝYH4H%*Z-2k:Z?})1HuK(]*W0\(FkNZ#l }q^Ls%+?}{3_3x<=Ds(pVNl5ۍra.dR?T*Iv?!POO^^@, f{V=0I?dOQ"I{VŮ hbrɸW X8(.1G;!A uqMeFU&=E{eBo~4z6YgIHp#f! 4pތǣq$k@?7~1S{~+q|<fhU`=B0NʊE#Zzq(?/Y(Mc29])ybRުc52 Jk,OHo u'dQSi;+0]GL}5 D%Wzi#}uR%py7Yz)JJ83co\ThrTĨjza?`jg%tmKc(N6Wu\Re@s>AXcASZ(0TM5l4WOuJ?J+; l6c0@_qv.̍R3oJr f>1fYWIu-\ "߀-c*3kI/g˾ծnU7CN&Jp*}˿MU!BoQrmH٦i,OR2fN/0"NN:Yc|ŠM]B5EDx oqn20rqz*y9wT]~uy[~?%u0=E?/L7ShQ, ,ikfQ\-7i%$4ґD4#&Y}Ť읯1j5i:Iw N8xqm U20.m IH , !o&r@*PXx".[#4ف|J˓6CWgMf2d9lD|&)j60)4`RiVRl" b h[pt{؂ŞnQzgw967nW\&j*0_jE/Ҟ')k#/liTi'HtwN>!=|! Jh7?t*R T*;bI;*_RaG94ĥ:I: /1r4c4iNY}6CAҋtF3;j=x󧷳 ?])w Q3jCSNaY _-nÊucBQ`9 +ý:,ܡx萕t2(oUb쫬IhYנ]\g \#B$Q6P̽@q8\P],v'R]dgX(@[ 7A"b>~Fu:k#o\ǂ>05HziY;J<-33Y&Aز^xZb=ٌ]{ڧ_#α@;gtDa[`tN qOZ+ Wv'D;4'qU#`!jG=Q+RN.sZ.YVֽ]ly1ۡlaw0H\9}^"s!& AusXMKt0/ܒnOy˹p~X^3O‰=8V'ƃLա޽菄^%d/ɫdCf`~qaN|+ϞڕtOx5MA3j~]d\w8}Q5ٿ{@b0K:z%͌|xi cFp/HocLOK/I+B1[Kby%"b[>H܌%{zI50/VkUؓVf|*JWh_i눩WI|vZc%-rit4xqW̄Y9$躇 "֐U!OWh;txqlл^ۊ&ե٥p.dPXg7'~OLrrc\K-#zB-B 3s,F D`Ib .P|_.~u p[iEiX(ŀ6gWdt4LH$**:w?=TJ%ҭe8hFG'J'liϸl-끙Lւ.6ʱ?qP\cw{ dc*G98ғGնr2b~#>}eQ-e$@t!紏he~{*_(7~w1!g i vK(Iя_F_CFtPU'NN*u91S4w6[/B,~ TM#ҌA"b4JM]1L`*@67-l5#h(q^4U0 klnB31bUU (#`.%DC1η%F{ fc(00ǏIm =wۅVK9;+| ]"/q>a厭hI<Cyz{RŤzN.,Aޞ`w?0ʀ?;u}~ " (oO̴\Yn'kh`kxr?n*>g{ZYGLAZSA$jϹkI墴zIf~kMÌ`_K'2$MBHy#|mfUe&b D"@ƣr`}ْn>> %5& !K>ʯ-s]\14ULyi>=4 IVo$R&Ar?Va˶h~>3B1UUx!0wFZ&YBnbT1s,vX30pa D6Xj#ÛS9ȃ2x#*Ue @8 ք-"3jHm(rۼ4a6p Vx"4F!S#9%$WdZ*d΄VA2L)U1T2t2.X,2"/lD9x Im-ApJ%yiV{(tą`oCzj\@T)i¹ bMOіkJF)h"9cg !T'"$ lŒj@(0RX1e Z R(k4[͘f{7&\5_D@#HM9)lj=-̦$WR'M c]Z ^ X ,か\g(F3^Q Q828F$-m @\[CBi~x.=N3"*ύAicsC$Le0Q0F"j268Bdf2a6'7qhpB:`&K(Fc]r TYDx_>`0PmcKػhpMybӻEAAAr67szŢhR\זyϙ37&QS4kEHA~@.)= {yTobe̠1tr&7WбLqT2[Q[J2b[/& ,Ks"y)KȄa 3&eYSL)Lf`5$m8 j*p*cUH0i6ZI}v~o@m!b Mok+ߛu׬%ZwVF;gኃ)BZ: m/6Sq"¾Ӝ&D2 !(MӼ`DS4Ga,O4uIt_Yo4|q).ᷦؗ6teJwK#t~ .'h T?66 r*Ǔ+Ƒ+I(sf`!>{ ܑoKA͘B)r%Rg3Fr\TL.C(Let[JpMis@a8$c 88NVyw{'!L42Mm>nm>|>i1ò<ڇL Hg90JOm&6N+]ԳkHu%&-J8YU1K CY4@© w1"S7-g#RZ]fݑ9hm ;6#\G-Risv lU4\Mn R7lŠq {Kqݛ ېݺ {`U7 .Z7T˞ Zr(zyDDD r"^4bW5YoCڑ1sa5TS*iôn4m}-_.DxSl6'0na^͒!A3G=Ϳ7^zQ9;?O'=(N^d;<%կ88Xo6gAhPiSQѽoю hG|Ck1 IBDOh܁Z5<]>S? Fa^FT΋{;𿎪SZv6lH\Ena-h'JeÓbjey0 H= X"&0Vrqwp; 6dx9d|G1&Fzp-cmWF0%7|2fpe' 2YOvzLQEvwki[zr^>a5`ў 9:vYGû7'ӵ$}4-DJf s='m+P秡; >٘ƀ8F 3(a jdƕD;/%8.ÔbtOzND=jA`'#ݍxӟZ㰇j˻DI>{7l@(mV }4ut>5>!:'Ƴ H{答@<C?QhN~7!ªHDeB 4=fT ֟4jm}F`. s1< vPK|Pku!:{[p6#zX=i$nyw6d#]Q6+;.ܰA=FQvU"O?DS_Ce0;ɺ$z]ڪ"߆:!I`Կyr~\ipF <&xT]>|?tGpw% ,:J>9>\IIww@jsu|4+O-ë^< >D fNEj6)t/kT;׋gpx˝WXΕl0jnprcwiiMwihN9M`[ C5b,s5l} 0J-lk=5u0OѠ_֮_n>sCqͦ2=  \jA5>֕K`pf*OnyW ^@-qlrZGL*{hRqj۲ Qړ*<b+-lGUr"<>R=q(pmqG x!Fz\(FC_Mkkxɢvʣ6o|Kii񈘪v7jwX*lT}2H˲jNt2+8>. e:=+QQWfc9jD0UN>)kV=a̍296~^Y 8ZKJWFs\[:=k^6!g;q2}˳I|2 ,/85i\0UPfKGTNLĞE ,ADhFz:8 NYs ӌzu|t˨d!%/@q0|rU=~'}_&?qJ`C>)|ʣ\Yc!(Cas_؝+!]zYcԚ#4ȯxVJ&O`V :`^tӧ};]>~[s:3'8'*ܠ~rGǷ7^i|1ֻV?e* K`(Ͼ~bi2J[6)M釾׽腯_+6 ,m3 K hCn5"aWYv[{ Ej闑_v=(&o^}1K5*oJѳM ~|f6/(p# \V'|G:I/%-K*Fؠ&Ѽ*_myp$gHmꊘkL݆cUGc77m8UcN]`Lk4ϲ`ł[-ER-lôb-V ^9 -o)Ȕƞ"9q&3 2)YqiūmbyR[́*jG'G5L!ۍIqa.!]y++?.T?Gꇁs~ kg* ,R)a-SZ[S"/t.- | r#~x.@=ge+Z<0w/7O-a{vX;5Yp*U|4)QAK>יD`1ъJֲJZUQW78Te }ٮx*a9=|zzl'Y߅?ԘX"̛4Δ.z)ɫ'GxsFCkᢷ/_r䂗f:քܼye߱Jv%\᫗ in6]%7X]%cV1ouᏟ1Y~٘J0ey2*BٰZޚDoӻ廫p-bQ^'td$OmꛪQ3IX\>cKO4P9cׂoyfS8rF9+3gifxOp_!xj>qT%g.ߺ*u{?M`pt9l7r_lv}b1 /k?kEuJU*/:XR(RCht-EF̸2pi6y7e(MG+ rpssO(D/~nh~k7[Wrj$Il~v?[obU5):m~=: /?(75D2GiޔoWO7OKS_f4EWWO͸6kP ;3ǸXɄ`GUz_C?ԡ>c3SZr^ o"T7q9y)Tçģb`c,{?ߜ$1n>P~vF6y:Pd)n {mS {{LC\iZfFW{w݄Cp]7NL͠{}+'6^ـÆGw{j(#{\پB !øclUq (e[+8tAw _:D_5`B(ωVJ❺J2]JK"欵!ˍ6TTin߹]:ϾPo=ZF4rbSG(6uYa0.6ǭ"qy\ls]J-~W쥿l,N;/H\yD_7^7=seۼ{%D)є0&JKFO}Yy_ M/Kc ~#K 1b+NسrxOۣ0iZ(|EMTK䙴سrx_7ڲ|m^lq<*vslw"~=7ǧǼڒ߇ȍ 7jO̫oHkO)3d/mv/riɂ F0p8UpUEB$)8-2'X%@ڴ$Q=M:,y\ 2$[_T$Ĭך 'Li7 "P4Qx_n 9qTM 3"9gx ɹ0Ƹ㤅F)8ޗf8Eo)%O|Ĕ. #Z2=rbPb#ȀI<UӚGa[*ΎS"g9ZEITr$ r6$*#aV9i8I8 wzP;vYQm.1|_gLLJ"Cq1z& \pہh~0`^ը)Ib`.jNCHf=.uCŸԐ)88UR &L,n0ޗڍۚp_; Vz+4D*p:z'H =18,ǁeCkkl<کO Z}_Y{jFsN bh>JI 8BW!lc50P(x >yD\ hێxkB}ܘG&X'!L$ "@[J߬#O9' 74*"Ω4zNWx,R@H t,f+F`6-=x_zv[2]e.fX$gA\Vqo*r-be(5^QE1jj:Wa#]W{W4o 0mn,Srua՞^v!pd5MmGMzR5i񖚴 fXFUYa&@Bcr'Udip\DTΥ $3"mfQ=/wq> w]:$.Wd%@g!Ɗd*&hT>OѨh" /4.h[>|P\SPx%WJLQY[Jqr)Fn_(xnDы'}w,C^Y>WUQ7i ^p`^,`DBnlw,pUN׏2G*G!G̵_Z=jnHmaÃhsO3^ʋBGŀj<C2adKA=b c84 C DǡYGr,B`5}x #AI ƃut` \=/^[AA1m9n,02oye nydpUFxwՄ'k,/;Z4Nz8:I*>lf¸D*ʈuJM6Ѷ].(5N)?>ITcqeӭ02@ ~qrTl5 jp¥Ɂ#LzyQ/xIܸy3՛tS˝ӎ^-{өҵ1`?r7^}65OyV3hgN rw~tJ-4;\PKqK|ym6$A@@+Dž;Բq2~,7zl\g,wlsKۣM0:`k ӛn|nHo~N6PC/XlN76%☌;B-fe8e =p6*#Z#71 /}j @5ql1nrFc`I+$f2 %QNmul8…T&lqƳⷪC֏KR) Nuto1Qr=^>Ms\D?M%HऍkQMޗn+/~6\*r=Hcp4ŗ\vȇ}M@T3j#g~dK-%{D v⣪X97%M2u#2߶UF4Oe֌ChV{Ƴ*W00 _@|zrИsd.{#.{z8~3NCxg J۬ ʊ9>|F2Oa׼CN3;d8 Fr6Aෲ|bA =p}_ͭ٧Sf cme~+Wz52Tƌ2:bղb- ;4;Du)6[}#V5[ @b+ D2.pjmt2~+ImL b-Rpt#سr,g#ƼR3q_mo寘e1P3XfXtYqN{2C*ܟdV Hƅ\И /#L*TYE nBN`%Ў13gIsV[2]&0_SD?gM# Uȯq\x >G[|c;(l0Ŷ# N!j)9 ߽i([񁣯{ &^-Mp41Ҥ8 Ji[R< OxԽ/ALZFXˊMVKg%ET__v;B2ɰsdY䫋3b [?1{ey $N-Hp O0i@(Dv[}[[>V.**UN֚~^k0\1ɹht.lʚYVm UOo{ENXA}b3W*HǓcWj]Øb3;A*llў D+\ptANzMe+Н!j1)I0 IA{͍$BjȀ|W쵴@4 g<2T ŘAT( \GLyI,R?̮7\&']׈q7tUѯ\|pFh*>^a *t|`mVI)- )wWˏ [~qHr+bBm\h>5ω~- +* vT{ ^^C+0p y ;3i, zׄa@1DE fp"MwМǛ& zyYz[lTU0e隋7/ <14 ;aDa#qDIrp%ZfF_m]U]7xZ.Ne p'.ԟ.nH(Ŀe,/Oۂcs\`.CYqnDz_ kN}[yUX|* ~+P._,W,\\bi`3WH$/ FG_A'^3 P=z >co w}W v}ØD4.$ P !8|: S ${_BDW aL8=_k^Ԁ4:Mp8Jm븂^{Scᕉ/~hZ| K*l~=믮>TW4g?]@V&$ EaLJfSO;.{yZRr(W7Um;ZsX|6^"LȔ*cNPKX"J{JSԍmar;>0Qh |vƊrǠz1/& g؟aM+YAxQ$Yhoj$# IHTD1BpB~RۊDqN%:vVj/1Xk &Czvл%W/0V c _XTrCcƷQ0jW]LYf54 qfu#"8E2\P)G6oH-V[zKp5)U { H04nxGnnx ճ^pkTqsR3-od) ČlŠhvٜ%oy,a^DF^wu1NN? N$Íbl(ur1Uip.;{CpXt.w9 t6êkکT}hmFoe4OX>fbq}κ㱹= "+TִJ2)刅Ǜ 틧1vZI<׾p<:YaQ8NQ߮ɶڞ%S;+ gMz{ݘwZM }@s<$ =*rz%LZa|u ݌ .y )E+dJ!;9Kl ʸ_5W`q5 Ϻ:"V4#bw"LeZ B h]Zk (1Dc PYc|ڲthh]0h{Nz"|v#t۝M4F_IkL.^S& h5i$J^+mP/?fAu\Tz - ~1y_^ pa>UV{@QP!Э$[eșF]h XL5D$m6`IY cRWnA'Lx_SO'9 o%Dq+|[y^>pB_m`)˩w7.m}\~]pj-nr,y4GT賎HnawM*=__ʂ%ʒV.p?h,XQިHHYM-RNcgo[S93OfUk;Κmv xۘkQgVzU춢r+Pe/Fl(rX?g;:oBrږ|X>5hss?̥'CV?@ex^n:/9~GbۮW ۺyNjRK3x/fW=so~.*k՛۩m 7:٠zy> bA`kY*HilaK(KMu’T qjCT,}|z8NnYk 3yi'ϦrkӷE9:,jNߞ3]+"[;F/^Mն-jЖ;l8|2?=\EWm?oFr~ڵIW>uhٶZ.mGC*W>{{+=௎l{:ďxܽ.^ݱ[Z NsK%+?1EKo/ǟ[[n hے/={ZUAX̝:͍_DeďE9{Kma,c uSa٣_hGg=f{w= wvk]?xWEY޻jݕIݲI 0x}CZ#C) HЁNĩz xaLEbULEֿW i; X3k S6K9)|4YX:UBxoVߐDZ{!$ܵC'xD Ό„^Zn FOBF $c>L <.aLX R{1^*![&Yz9eXE{Q%V2w;'p]@P;AG䧑\tFY OQ,wЫ% (Ød;08d/h.nfn@p;'$dW1jY$Qù#v˒ !k~ j';Zv >3$oR;U.)E18JTSBI5\ hZ`x%7)TH_6ޠؾm \Lvە` #e- fSTr"3!F}MmOڅ [+Bj hN6.)󧐻Bތ@1'CB!f d/hb|/o$mRΨ b$k,gcJ&D\#e#^/ W~ $H9)G>~ הԥS_MRU' /8TW,gqNPT[$Ņ:t|}WŜG-  Ѿ fjxIZ%Fw:%uDpbS3 Yڈ"Zn ֻEʎwe+YI$țbl*@e2znRkk@(b<}zhK,B#fxoXj)]u 7S+|xR6`cB\j$;I> OISn-!c?iz{ۺ>1Na\WOyOa'ym)0HKP\=i)3u8~vX͖܂5&pO Ջ|Qm'#v?o5vsp Ep%}lZ6Z!]%cVnk7$[@5+ |_'ᄁ*ThX' m?% 7&P~O5~ޞjK]/[śC{Q?ϰBe"A(әH3p|5X2m̚Gd&УmOC.KC +$܉iFҀv&`Ɵ3rdBf6<( C 2+ 0E򹁙_mHd0`vSX|ںʯA݃ݧqX_$}S͊T~HF6Q"ow YTo9o:-+lUrnp^8^]…ٺ|g ~Ʌgd\^26_&x0dd\& >> ͽNݫU|p/8Z3w8Jm߽Zw q`OY<>~>6ov .)Aj 3[|:'oOW bώѳqBJ*-OťEea%ha\r~JX\> U-c7L+#|/o =q(ǂ_^E3)bE#S,2s~XmE#lf4xd8$KpL =߼p!;hUoD?8̔*ƈҸTq2R NqFÓ>OLc *ʚ HS^he~ѥAfC>H^NщN@yNA& pW|JUG(J Y+bW9`I>/_yJr2LcWl7\rk xgv`ZOM͐߮6v;̆4 ne%zBaLvZ/JlucW4b3't'MGNnˮP_'&{_$d˼3:hDfe})+s (PF 6Ѹ3 L8:9 y)u"R ,U.oQr!`p T~KqCDr(diQpvqFHSD(47梔C1a\{o L~#3! ߲#)'۱Fd KKڬ_`%Ȕ4w;p](\-ÿ K/ȼ CdY Q=ӐrA#*s( sy>ݧN- C*2 v[pٟ?^!>G̔9Bx͘!!|ZGt\*P-0&{\zX~VDQlkD]2B8e֢%2c^H*0\IrbϷw1m5-OcvA rxnFt Qr63}qcSN )|X57ޞ׿|&fSU%0\zYs˅Mfzߍ֮f]ϫkPqE owLG7I16ɱ7nS/}aej#hʬ N- v77R_w5;9^X7`f" 6 eY i5/R)ϥ{7z]`Dhxٜ# ~S -cj9;Wܸ\̜L>b bQ|X PΑv8df% qx824P|aO& op. qyHz)UFa)E"|nV@}ԅbf )3;@ Uy\vhdzpŤD2@+&3E,J `C +Kg}{pjIB"6__ᅡA#*s43as.0Fl }x.+V,Ç iY{'iajɛ׋iE#9H9Bt|st*0θdҥa8Ac-LKA#2s–]Ԗ5F E hqf ,T@ڬ1QcȜp{qꔍU cmr2A8!?OW}`nrAt^ի>#,FG{jCX7ӥQ9EJ/xK:I".s=[#gbx%R$97cE#.sTeq'>zq8g*A#2se0̼0 Jj 8*5 }@ 52 -NwUMO 3>; +mH_EFr̀5^/޻(˱ "#7$ :q\j(vBY #|Fs膟"eÁz*oc)l[i R Q>sYxKmDJ%$JN@tzP^ }(ldոK{/"<.A 35-Y56u>cs;U86-e5[.r8LS_Wn>)G],Zʖ^b&_? &~4_|&/̴j~y?~v~=m=g/fR͏z & i<[NtIfaCU}Vo7~*[.!866sǦ3;E;d}mhE! >ṝa#hzmUk.DCp |8GBmԦ-{BA7zrI֏֎r[eoj@tM>]4HMF}* T/ƶJB݌u?$[~͌3@"M_w_Qr2Rw%X}P/>% ti3_oC,F|u(Kк6;X4n۶wpqQ]}vZ߹)a'*Z2j_n+lQL??=<yp4αiZO(*I7/ke`j_ CT *ahQiY^;sbR_o:/vOUjV,;D^N'%GA?8*$m>~<ʙ@wzU!I6eQ/A97(ATc#&nfSAT1᜛k%1'2 #ƘU %26$MSڿglv%]tb7>MB͖\xf<+] o8jN%Sᇠ P9B ]W xb%3k?lϢ.),Tl$SMϰ[b:&z̓!*XD/"!и+"ubK[mbQR4_hV i4uI M&!✰X~%/2"eMҀUJBј9%s݊X / ?w\ ZNQlΒ!&cGYX̊^+VK.%_B,3LYv՝ 'p;0HgeI<- VP^sPٰEjLqE`:IyW![W?sh |aH&֋7:#0K2=}2weK;k-}`¼PYY/8 ,U Z u1wlџF{h{\&~^R2ll畎Z va*a۽TE1Sj|x;u/l^c{IFzY>3^ ?Vp'AouA8"E%(@a 3(QAԩ z^o4NKz a96J3JJe`skGxpS"'474 fB}[O+.KV6s 4Y4hS]g #=ӤqT^_k_ $+HIH Q"|5wD* T$]2Q4CnIzg-FU iTӫtNZkPp"q2 񘻠 6KsQq 1Ab J>ipDHnPpQpV0F\bZBWnGMvpYanOE%\3 ʋnq*V%DP#b[q7XM ΫpnT2:OhI ֍be>M} wVXggH﹨,B } vЗޖqVzSwtA*֥7bԭ1qq/7 A A#iB PD8r1MkI!0~OcJ_F´4_ Lѽ%,[Eau?q3 ط`Ŭ]5T& 뙱1$f>D&$S#FkuN?, HOE|a%_/ Ɓa'S4i/r)8JYU,Fmc Hv_1y.@ez Vؽ]wwݼ>L? hbos''< ~2#Br/s4}|ud@!9h3Q#+Y4[12oCbW`!}d^d``ߩ2^E:~#hxX,]=Qt7ލtv xĘ&]͏Odϰ;,P~oCP~o(7 W{(7XP~+)UkMvg|a%n|?loݬQ jqeϹ[կr5kޗxf?,G#} ;M` }(F[}/ l+EqErI_̔!YVw9܎o^&.ASM_%J-uWx`U0ǘg;i񼑻ϲ-HH8Z-vmTSa#J ] cNR~L;aJ~"BXE&#HX370}ȪD "=nXIȶ/Hq`NF=h|i$PgH= ~}}s2ڸDtX`Dp E)t\K0=SX*51q yP! 5F X^h9~ڗC%H#.cx愔Q=ǰgL}/_brjI\JUEŃrJWDaDQJg铗<}}.i p;-:8x~2*{ STpN6 a<xSm'[}1Fđܙr]嘬ɽDn8}Y1)'}{Y!:P%q!q6HѧgOSdёU's:Si/(PEF+Fa*JbCs@Т¨XW5c+et(ko7LqSX69TSj}d xdd1s} 9?>(}q{Z|wXLGrqGGpb3q8??=<2FiKKy%rŒqAKаp 6Bi83_7=jY^/]tff"2r=ot3QB|[I(-Z]>_7?P/(Xgt>)D4^v:e[{j=W΁HɅE'0K Z8R7}D*E;RN`?{TGFt9unojt~߯oQQ1z&֝CA`:1Vh5uI hM&!✰B/\,a/{ee7{勞j=BBc!!n ZcPP=XFE?\u㶴t;LсdoG9" h_㵝,xeAB9ʀ< cgbu*-E9G.b<-cMӡ_1E1zs*2+ubo~UX[J*3q:R kavqeKSȒakK吁ގnFDm(-cL!Td;;zA_>L:6$v3IY|-j?0 _$Z3bfERE@ ΉOP,,ɲTIPL{ӂ4,W⭔t]kr#<& 8 bsȤ^[L:&MˁIZq=R@^!Pq7f1F({'H1ZjLYjfeJ@$K1NaBJZ8[ncz8 (ᙫhdYI%`jN=DIjC>KNaܙ!!i`yJͩ>O{( h);5>tgy/j#aij" HՔ LE/j<<ʫz1:gRڴ3q8žKa@!$k6Y'8 7mwjb 2\ȼ+Az1FQY42= eʽy}GZ8Γ ogyA" ]cg@sSz=tI+atLBJe`!9xpS"'47H7lje>ZO( qw)NkX?X4hK)G [,GtGbZꆺ7TP{=T*p oC %CDP"z(=\DP"z(=_3DjF7A'7 `p7 Hȭ5;d[WNZ0R:h,q6h?4Ly䞱նak#5#*׻Tf:rMsY+~*[{lk1 㫉nmΒr܏W.𕽺_ v$zt\PqC}2 #ƘC 4$߆l rpAZ}5`cnpRݮhʜTv@vFJȮO|=^daWkD8XR] ƿ? _g73q2+98|[?@.)?elԴ .:cZpj&rF5OcyS I2C$&^=3~t`!5=uWs{1S8%,Ɯ3NӮ7Pfr} #P#{qJXS=KۂrQtc̀>Dw׀2&gƀF1x O'O9 4h+GyvhPl~p=vSj݁iOL":t-ic>嘶O9S3ҙdA 4fA;׌0S޹Ĵd&Eb~@'S?_Z00ٻmlW8Cn;v4L/ndIe7NHJeRMʎd&-Rx989TfiXc毩oQN! GH&ƄPRY+SQF+<w7?fCZjCF4E+_<#H.FZgO'4pjp>IAy8 lX TVC܌zseS(  LHM2%\eJ  i 9;A(w(p+ڼC-ao홱sZ2yɗT-i<ǁjpss%vcC4;m/)r}uLZ rm!8'<R% ΕKm@6!TOmB4ck3gqNӄܐ" Q ICȽEx 0  C-h]P(,;br?+cm]ԟ35~(F n1e-VRODc 50bǖhQ?55u1$ь(#346dYi3AZ)(AJlP'\hHnH}@-Ѥ Ҧ};ow%Lam%H7_(m'k01#zџ^ͷօJٰ`x[> Ռ7߿Nޫ&nxkV_Bl%&djʵ( dG_e{ x!Q/py #"^לʴz_0`̠LY YFxqa%k/>ϫU?Wƚj鯠`*\YF1l/ֿճF})BHwzW.V0!ryvѵx뽷Ⲇ5+.l mrOIzrbɷŃguxΊ\ON޽/eVd6p*vμm;$l.ڪAHZ+ko kopz-|[b9RZ_뺓m"]Hz|CїM. n`2/=v)4l\6=Yο9{7/ͷ/y}8?o_y/ƐQkh&ԯGnUϳW]8DվuX.U!{] K|.{|(›zz7Sna@M~rf^Mr(JוnI}]Lb_f~Q$OtUDѪ춊u`.Da;(v|Pdl[bEm+cKEr)ù PBgU!:9 A@ Im\.x(g."s\2vMBPećD#z, [7r{S탞=we2kV9Iy$J"u1E()Rʅ<#L lj2vu詾čt!}Y͝&6Owo:1Dm{'Mr{HfK tn̈́tȃk$_RGH;po4SO~"sY-v\pySF4YʰiDThW2|fp$vJcLdʦ ̿i*r5Ѵ^;Uk8 ;t*áQZu cZ/x-ڻSm܉B2x@^c%j?KZ{; ށ~sk@ݞ˚ycoQH a^,T?ZTʱ++3*.-󤚡Fɏ󐄻A-{q2­I89m_*ն}s%3}:[nqQƐ&n d"|o I5H*E ]d ^ϋ&w_٭}]+ץ}6Jjm0{= B}<ؠ,,kڧ Wz7`퉦PMY LPRbej\t-(fw+) '6uWUf:;, E4x٘A!~ FK0n°IJ`T2+bf?mzH@kJՒ2[AwEիzqP[jZifit?yۧy#kkvVc~cMK`)>rq_lar~Q~z 5ZTӧ r.vޔvݻ2- ;|W{E-" G"l?X@ʧ]-uF6&oX";v\^Q@n0JC w]{yZ s{Q/>u8"<Ԁ\d" \*e˼iKMR.54|-~h 74w5"4/vzOqCyߧݍANβ!I%$B EDDѴf/V2!bj#ӥ{:zmW-}DF#B}i(¿ڑ:F7ԽíECvCIޖ}sPˀBNp֯q߀N䷡1>m-٥!%0tnSK\ʁ%QrpI6BTܑ]*)#U @e7ި2^M nzFFkԐGz7SO"z(Ӧ ))9Ss6=mzR3nR֜ 7+O?M|8g%,X #2?YIMJxT qːPtSJ8̴1[ 4$cD91r ˿Ct(i?Ӯ'N~R֪IjFQ@\tŌrߠk19#cN_Ƙ9V';LB,#Ja5(lQZIC6qA$b;050_opYX;*.nnύ)[zu:"$]7]YR": B`o#[tgL(aEOy) Wn@d}4f[f+ـ^*3VLZa~Tx`c{g?߄x?I  \q|q4ũAL[y0a8Yy2jPDއs&b!@Lp#%z3c:(Ay8T}+'%N (ѥ7S!$ Yu}[iML7v'nr mr|VDl~ZL>2E)" cǠvbd9_|=nwh wd#@ڙ&lmMI쁠73|H^zu`ʼnwb/~i_LM<]?Nid(-~G?dj5гiJJPno(\JEϦx }:k3#j-gJrętT-ϧyMӯfy8qE_2RX}ܳ{q>=g="҉tM)5EH隢iGH]EkzPLy$ $$ DHI"I@$ xp[BSxOq,D*@J G@ #ދx9b(CzxZe- W(583ZeX0Fr37%4co|{^@w I_EԿVڽ}UÅW:\{s>OnE3wXogUo>oaj~u@ebU!߾A`bv3-N޶yݜMD7.X~0,*0kuvpdd^_Ԗ:UHCmIU#Ls z:=i~ru1|SKLpg2ba-ZhAq2CFY##[ BJV8=!ZWG뺑 +g-ǜʇwOKҩ]T;(=|Te0㚑Ñ4ytHK$s:YY+ ~2CY-d, Y$4s7Bʌhl-Wj m,31) P-%J8Ć!e&ӂXf =Ec!!`kY--Z1-I[b'rm>wjKrƌOq;a|&y}smWɕ7!âU*KDOy3 )"o #8&;r@ٿǵ9NS }m.0e-xHRld/o3m#2l#Xh8XȌUCpsIAV l0i r@33b9u3ݎ4I'mRu#+&:%Ч5E)$yxLj#qH\ #͐}տꮾ}f>*ܽ>/*MXj#jk%NR^rJBrDIE=7YG"I9Y>%:t]2xddJ <(x`!i(UѢ"DEmW1c)P(7Gg?wKYš|9$Vȅ9MhKshM\kԩ$8EނOg?Y4Q@bl0σNh^)!v%KQ[@yrV+6'>#^$ib]gWɁ2g[!G\RHFI.卒S/`\`sw_K-l'olK~p2nt2ݻ{<|J)!E V _փ-MYmo'+mU{xОTp"aEʗv9iej]205ө㯏_yuzϭ9?}<:2`ћ(.t0wN/u;7#r3\ջ8,Q[ 'C+{nXe7@yaJbᄡOHLl{oBf}v:'&s<]{LתrlFANĉ>78fM/i~mF<ѨD稱No;(Bst???k;\FUAl=l7VOI\CEN)W>iK%T ?~z]/| |>xH?lDa_6׽ QJ=&GQmyTw4!}ԎT2 ǜ(uraS BPE* `)4EX cJFJ9\w/gb.ҭ5 ^Ad 6P擕( W0)H 9͓ "zH{4ÖۘޘL,褶N6[G+kx_mo:qtK8%\%iiy'Hrހt HRivVy{zdA!+u !nIhUDHZDᢶ @8eC!'Uq{*OG_h{ÑH\kJ=!&ji.*`wItOZ#U\_ڳ=x`c}*5-KU|<~>_q#?|-]s7^؆ f {'MwwQ)p]; _g)wá_>vV]^wݫHjgr^*3vJoY:y c۔<߆cqlɝ |{.*rC,S:k/8ξښWrࢺ%_5Z7&䭸2J+60c$w\iD͠qlLڳ ?,],?h?LmfꂪR}YW{GTY ?gXstb]/GG|4WY67|,WחX߼Cizjᰵ7ЌM8M|+LUV u}5YkG*kYŠ>Nj ps=ٍͱD-wog^l&=\Xa(?,8npP̳ϊTe_+(atg:0ih(i۰>έŀ>rY <'5z!%tUnk[Z<4z `^2R7nn9 <>+u'wڅSG^l؃r": }9Zz"'v|б7=$xgF%7qX k\6wI;$!cS @A~57֋v]n'5WM`ķ GڅS`ׁsQvV 0s. ݞmb%,xVLkH+Hā9CӀXBtBPƹIB|$2A3⩣FksNLIExbg%Z#NyrkƟ((0@As5?;БKi 衁w$6>c y _MOZXZD OL3Eb9̩kuft hF J=>XDxFLQS RpoI J ӪK.UI z/s^b}dXP@P;{Fw{hW+=}v;J] p&)̀W1tKDs*?/lE/2dSzpnIIzIqE5MmR$v]9C"e`TMnpR9&T$wv&8Ftɯ'<:((= Q򚌏"i  2O}"\k҉A߄W.cG-*Y)1[m7.79~m\ؚǒ-hTҘIR9R8k 8l :%*I% ];]5k:P|fuEO*rNSK%jREY?*ߗd9\*sDmvP\qSU۸o:zg:/yaPvhr*cFxI.Rg:EKbLx-*A򤑘Zs>;V4fVwEub9{);S&-X鈍wtǗۑ&LqWVؿ^]'pLi:$xbD Zsa%7b/aA\bLtkE8NӷwoZkN 8/F ߋ>\\]l_.~F 7>)AsJx>'Ib TLYKP@Y|m'8YK>l|A-M+f0/ D^P<#B'@Jq#SOK QC=0dL\Dn% t7F^WD<1t0\D Ǚ-$%LoBBP KpA=)RzԝlVzm}s$!>LHQ2R$ɃcR*FY&K庂RtADBQm+RI̞7j3p& ң:'A- <{L\ϔLc<(T 5 IޠGj:NG^Mv@*\}, &zGy&eV:r5.`"X}R} 'y9) MJS-t=ҳkiH%`#LD[ RC솋K4"ѭw 'gEڻ"-/;Fh^$i2"b]egW̙og[n~*<3I_|pp~![d[qUFw25Z0!TדƵ=Zuzvg+{*[0ZFr} &)o<ޯ={sp;x9V^ :%z{W(y,E9_S8Ҳq7^kn hTNhl0Z<=㷰]yÏoҟoxטþ-H`^p<6}v럛wI[]CbMmRMmrM ][R[oQG̥ WYS _|5KW``%_O{D=*n}(.H=>( #5__~+k.g2{) #מs#ۀc0xDAJLĈ1Ĵ.xrZ.]/ !s)&hiJ`N F$Ӏ蕴Lh%3TSYMNT8zk:Qc+7v[;Flc;lfy{2S{p³X ztc$79&B/$ *=ҧ/6~ʛR˕:/?PXb'3Vu\͎_Ht^ͥ< 2|.,* xaxT4app1u/ Js/-J-R#["(r1UFКm4..'bF.Ҥ`0 Dh_Zu?Oe!ؤȑPe'^U.Ƕ+ٺەNg#p;2'`r^5z6╲#$.T''<ͤ]ǹ_#ޑr \5IyWT]KP֏%+/w҄ф,4^lMȪѯ`6"^JK"0mқݥ >ÛNJyw&u6vbRoT=i2n26ҎaߚIc hjݮl7~@ji$owb _஖UN:=T<[Ylۚ>.Rԙceʘ0Ŵ3)0 ດFvqXk$z/=Nrw㿌ձv߫wRnly'gۍ%z3Un^fxAF2?xbִC|WuIvϐvLD|Ye؈]jF"Euw9)и):REnQҿ^A.F3aeȹ_Dž:)OfWn|=qI`2ȁI w̕ օ3 m zՙ3|s5BUO>+?"zknp CmA'O mZj0GM%2Dpۇ8~z![%N1V9zo)-o}ȁRDžNiP?Þ%9#-OKK1xj員HWsT> t*_̆I/`)ԐN&iG^!^mɋb9ЗLve?07#P./jv&̋^6S+R ڮEAZaLl˗myCֿu{9gbny=./3}iyOV%+*֯Jyżi\h7n933pL$'="tZ9'ID#*\/<-@i`i#%7 JQXRZe:sJtl79~A9DŽRn&x9 Ȣ`;X"VD h$_/E|b*Q|t ׍i3g* yƄWu>yYs`gdF? ;kH|%gS4agwoKZxX(ҳIBF3? ̓\fHLpU*6ژa[R!rFa.`Rb 叫P2j ՞?790%T8 yS` SDc@ 1Yh6.{NS+{/N~p~SX j-m˷O dӾ^!gΏR囔?E+ 0/O4@o\ֹͮ %fcmAIF؆5iYoy"5+nW#z4+*6["~e""J`a?=Sì>aVD؋M>O/ҧ?&},Tx[;9;; 4oTFG-.Ƞ@%o_w ILyP^,|}P^@ x$ =:o;cƌL"^ˈi7 ihDV|q4ޗa&Qv6a% $(xޠAF:Em%粕ϛ-}z fgE&4ѵ%Tw t6` IM ר]QQ7_> ̂JKB9UaaR"mշPpVɾ[R %ZDR>RGm [bdF/cL+l.esк6bE]kr"mW|=~R9Z/Ppi8ڠhdwx.P!`V0/2ZDJGt`ZD"rRl¼],,r!c06S:$ rZ0BXGmƴaa;.lc.ƴ (QHLd$&C:łKsg H\ ijҲeK:0/ŝ' }d_ Edr1|p?>'cIz? P( 6φ`! ]߫}P שBuP|*_W+/pȄTT#VC52(qyYQH{y[yUMzq0IOUoqlc`E/ݙѷ2q?@X6Nͯmtzۼ &i5!y!و=Gpr+ :(BT*%ԬvIqj[FDB\PIAjt ZRiWb닡yby0B~G" jʈhA` FQ!8HF Ha lj2|\vIg"󅹛_|r).$?ܾ<ӘG"`&PfTetۨ`p'#rw9sEObóUr~8pC9at<{-$6;<:~~'ޢ~w߀DlW~qTI1U ŜnW_m, ĵ@L J-l.6>jd&u~Sֿu[|s i©Pzhr7,*_g37zwQ;oֵ^gn:Sƍ}qo=iȄŸ5Òon:?eԂD ~\}=bo˭1G 4SX˜.'[WB,S N<>Xݧ|Soɧ oAB%[ ì0CД0-pۨ%7S&JQXRZe:sJt,7l3/>ʋ̙p|%?{ OݟѶ[~f9RV  Gp4) }yC.0`' 6ulɩ*Uȗ3Sʣt V gt p.PU)9EZD# V*꣗T6aI m4Q 8FҎYp-1K{4 [[2G#4``Z_(N#t-$Z>nVdʋG. 7W5ӲѢw*aC  3c." sA5CR() إ Ac$NIBZPWpv}o3Rp@4:&U:b  B$=K],̧e+ncm2%݇{d\8ia#Bt\a+#方Yv?rY۟jkjbH L)(Q)T0E)Z'DAw,b'DlHRgzxm r;oE :X'&8# ) A+t{K8}?qW#lb\˫¼kS#( _}Wߚ]պTR$ʪW=ͫޯ?7#3!QDD$ qy={7"E$kz"S`hoRFwL]f9,0o%P0 *:mqkBJUa +5E{?{Wq B/jzS"V d=س:99C}@h tjhVueVf~YM~ :; e.9{b1"$s^J$"gZrzwY(!oGc2NoTr60@a(KN57 U]󷽥5Ocg~T⛟/^=Lߕ\Wن _N}- Vzi~ߜ7P73yiP4iMZ놣\?'ZpyFb]eqY>lu֭$S}781 篣C?L\o{Qm\ >([=ӿAݼW7O7^H\O6P`ʸ}z+uK)jk0lj,Hi. d^k]|ܙ훹㷽_>c>_7ěxᴰ#GWk1=x3k_WߊP`*Dy\*ǯ;Q-7/Ghi7аS'i:IHPQi)ESkv;98DyE$tpӆrOAm{^;KpKjÞF&1W1$!-W5aRxNTiRgnh4'@cX)6v[[$뼧a;jLzW -nWBN^(F1,:0mU)~4K IHT_ Bv]۩o#ϛ+TBdNL;2%:!J1ĬW^WV:(nexo&O\0K$ QrAup%u#,&z˼7UY_g7NZ}>yKux0G*vV ~' i:::2gH<D<ui{DXL>\0g;pǯ,֟wԃ`ީޥ}thI؃xPphbᰒ#eQǃO5`Iѝuvà% Ba[$z`?Q-{Rd\ b bRLaE&,.)ݍERDw OAbIk_Ncq \'J]BMͰT#?6xpy^! q.SyԙeFeqI|pluf;pP>?:jA#N JPQw&䃇k6pGeGg̑`(>knvdߦ3smsG/;F-3d1Ysqbr?;ϱlƖ9ΘG #jV --+c5O$=?ao ΙA… Kv湔/Wb M) +'Đl:v#-hyPF0dF(bN_Fj>N'/(~"tL qpfkLi-k2ʹ F8w~_h[hwi21wJd3llcAENU@A Dž, o+[VV&?2ܼ(ї,%OgM͉d\j1Z̥s\j1Z̥s\zZ̥sis\j1Z̥s\j1Z̥s\j1Z̥s\j1Z̥s\j1Z̥s\_:A&b.%hb.K-Rb. 8AA$n4d%j#/3Έ,`$볘B5˨q#|T#$Y#dK%CV`0ϬdH&u"9YxW/0l/3R&]?jĀ:b@õ+;n5gj[DvHRf;1^f[:e@j\dW:u){"+]JPjSxtw[[m!}g=ujѕMHmȞzT:UJ 1HOHݧ Bj59-v4?cQ9oʊ[<aP!77yD=I/iLe&dҖɞn nwzoN|~OĖb[DGu`mV -}N~އtŒtwAt\OrD@4]6!>e+-i095cl߰FGpeFJz|W11wBd0ZLa9UHgDP S342˨LηE+9R0`RIO 3BA@ x5g`ROn37Q.`Ѥo)yVǭwBQ2%lS}TPо'Bm){F) }&(eaM6 AuCfS!N"h^'BK7zUA5; M88{54y0aKۨ7QCG6a y \A\6[Xw.d] m^?倅p cHԚ߆wkMhguưYޓIHQ ډͤ cE<7Y֍d3(/ ),` 1@fj8EٮfNPM7־g.4y.\S]60v9 )Opj~ڌ_0^ZjluΝII>ʙ PyEBs4QFh6nһyUiAń؊̰D?o^~3"b(D'P/."`sM==zKW?\;Mbl.E)l7{Uor9^M+67?̠UώN}{hˠW[A' S,\BⷽR )EPW{ze*.Ȓ b}\W4tA[M 5w jb`_̈́ g&MfElU.sWx+bZ|cl^ BlMZɧ_&aM ؖ6n^x[V. @0ēb=319Š-/&qfa؞c#wvc+bJt]@8xj)~6^ET疉+qͷF#%V >] bb2.@@o{ xL 2"͂q5'xytxy`vW/y[B~ͺ9}?]wN9ǨGgJ&kk Z= ߔ j5bON9 d~Z xn7A?C1_YV^9ݥr:XYsl}k{ ybo+acF \CУi>f,_G/Պb{"Fc|y!b-ttV<}xAlX}kUjwҶ:6򮍼xx೺G<}?G-;rIPQKYc`hzM푕UGb֜-+w7_LM1};>NG^x)g?dGܸHY+IXWdl˿zzqx'xg> Ú.=0:#JL*٠5̱w%oBd%֏i2ў θ{\(Z椋p%3l7߬fkqXŷ~Fhup(2n~k XģMŗ &&'f:x Sc,ܞt@QMB`ObEk0z3nьX>E GD:"7E:`!Lz Y$a2S1$@Yd0 9$ 6a@i& [ǕjC֎X%{A2&qC+ۍ8R s Cx0Fݜ*R$6+jݗ^nq"0VlIfH1䨘bUť:+ɪ`혴,miiGma0;Ŕ瑝r~jwZ}o꿉-OדdZ<7 rKdTR.+ IS[EHT.s rm)N~X%"B^e9K~Χͪou$]’F7ŷSH>PWN_c$]-AX+NFAM˜>z;QWw,"}w~t|SR2I&e'E;^d&kg4Y;ɶhN)؁R0z)˺ T2M{9͖C˳3ߐ۷Kay 57~g1٢w/9drR>.먗P64>]VoWכNQޟ.SeOA$>z,ˍKt_ln&yvsa7-Or@W^uލm{3-<1\_`w|j]~ \20'|9üYOîN^ذ&Fq714ăi`1FaҡF}VHMOp77SMz(_M^5y{S{^!OF@/p ,0  𱽾>Pӧ;C<0xr[\k7i}kscFuqVw-mYɺf g‘=vR`уLkM .ibQipi.^Odq_ٽ^{ܳm!x92;[6~wQV+mw_OJ!]}3a vڽ#oz]ѫogգc\ֿLHao8N cq~lܖ[#́+-\#`H8ŚZp=|г+*e~'@*x3OuS7dlsp(zoļ:<[o|;\H=Xy7x쇿z_9ó]&'#mS旿™7qWqL=ۣgSkcr`|ZNU,G 1+f&<_e!6 V~ -]Q嬙pE`HKQRdFMR0~.U,Lj~|t6rnࣶq. #⁹J%3%̇ =kyw!Bc"ysƶ|HUC%to ^(g ц'B糄?FBFnl>V[aA͢5OIbE6BTY̑GKxJ =[%z{#*5W+XФYQqlt< ~.Nƚ!bA[T,ti =;ez{{)a 6baN{nu~={^)LH?%GOU͏.^U#TGY(Q0bV",m?FBs{+]HIrjpWѲRς@y9~.=U@N"e5J>Fe = zgu*E~S}rd ! x}g4$)`'0mRHZ5|[ Rl&Ҧ =keo)#`5זygx6Oj ԍm|R$2"wHHcj86N5%K }0KDV T5Ғ$I&:B(',ǀȑb\g-l~㼖开r ~`oS=~4^ys9Y U4HmH4#Zf!%q\#$tvs&e mE;Ŕ¸ģToAz:] Ju IH[o!b*c*Q~SZTr"4DOƒV  ];ۛ[' d5!xeO]fLDPҊWP&%88sc$O=~lqY_ރRB ƺЦPp[h$(aغP';%]/Zy\fmd8TDe`{.୲;HXă0 B!zBSoxMփϗV2LAX#x7+ ڲ91l9v<400FB^Qo'#1B 9@| Nz RW֡A^i\ i$*]qMhf`#{PU@"+I<`c$txw𙴖ɲ[,[r)64~q/p!]EE%qG&10H^){ $]5,-Buaɔ0"%cRCGH]{/ dLV d4 @8FB(!c!C6f%̉ l#xPSk*LB$μ0ⅼ-4W>qV9m|Ѳcr6* =[z; A'%(1Z%zxIج;FB|}-ˁ6^a,׬&gG|Ҷ@Sa&zNXR1:׼.h{%f mj1Y4dc$/7u_l10vmy-dB!76?FBRwOlJ͙wL(3pqdѓKRzP#$Ow-VKJb/)J,L2ՍksY 32ް()֡ͺc$&ߖ2?MOfO}^.ޕռrfDͱo|pCQq0f7lTE)"F_ے9u!(-Ձ&-"|Q=\m*[EխʯV}5뛟~8}/ 2So~ ס WGȫ_WG(Tr ,Gٵs3Zoʚj >9+^{PWd yU.-[R]y?W-grKp@5dK BRMvQJZɋ r묋T ƤjtZ$3n?]Z5RF"j2qNZ+hΆT>/MFEY0D%t4KJo(T `Y əVP#TQZ5"{!TVMQ !!1BB;8j{LSVǨh1̑rft&a}0T1ιN'dREQi=y=AڄON◵icF#j_bJ$䲤MSZBwvɁڷ[A%ݞJukT*~&0/}0EM" UJ-cNPM ! TL&$)J5*U-9d]Z?2 Os`NxQbFhf8I_bPU/ϝ'{`sYṬCZ;s }^1 V{PRdk&L3Uh乀cϾ,tO/~g9'Bo7- ~]ٚI7gyyQ,v`}xW.fx^}YnfO %/}~?7JB=BVX]]#nMUnM~mzow1 #mO]]l߳O|޺Ru5Tv\z//@᧻Q_nTݚqE>kf_j7fjgvP痱ܒ[,R!uz.ϯZ#^]h= (V*%?™],B[c{||s|yx+K)>G.t߻N^.RqHBŜB^!+*@Eh@*x@s^lBzɽȶ>NKODW.mT*1TZ9G-EkK=0v50..Ow ut_Z^n)\*C%!*U s T+ $KpI]};UE JD?sOܥO䝧3*}__5)^}( x "bujudɨ#wS|@_^^Hw<ם=NWnJ>%e/)Mc.i{@)=# H59E?k 錝]}=4gzɑ_`MQ̨U' w>I;P>SjCu+zmwwm~RiGq@:nlx6J :M":)]Na_^FŁPT]vǼ'|?_?BP *x<\k>L7Ƚ{۔iMhc);ڔ}gZ,gI[N^V7m_Rzu ?R=ǜ'?+\P{!ޒ*2*yCTP1PCD'ks?/Pup7U&}y>?ryoFyJ(^@U ShXRQ KB(Ӭ (Wm%0!0&aJ;f2Rʃp-A[/5^#2Cl36xN+|TDhva؊_[i&CWj&.Pmo?D+}YTCz Jײԣy;sm9luHQ55ArbKOHZI0T3$\u!hlD)׈ @pr:%_yf,9#R08IE$s2JaQiƝVc  0B'L6Obrdžp& GcV~lf݇eor!Q@MY]p ARqbxBG!q jڇAPR^Q䥣R&נ` a*D3#R`#I #e9= f9ꝷ"rg IF FCPJ&$b%5&y5ƅ0[=77򖣴^_~o՗.z_2 a\7y?+/:ɻ> G~1X1$>`X!S`{tq% S֎/)ۋ0U{ UE-6`MH`8/JM|m>5>էDAaTX1_ }mt7qpo!Ҵh^۟)rRE9!m W+( M)KU m=5O.FqcvOE`a2 |2pGëR NnôIo03Co${Gb|H}Ðaf,o>+V1i[ip;]v7j߳RFӒic!t<@>Cwq%ÿZ- :gnx 'G~_~N?ӿ_>o0Q>Oh? LK5~{_=nXohNr uCS|vW|qه:1fnr~ kb f'm8=iM p%q=} l~Xi+꯫H? 0wi#ˑ~|l@E'sډ{Ύ}>ZF 5~T# l!=EIDk9{m1Kg" %b&bBcͽ ̫~s{h(4F!;%*LYeB():Y6uN**M0 ӼT5[;mu[ek$G(/=|#mh:Sh4W>4`۰ȍ﮺A^s|3:잷BOjO~>FCΛׂ+kvE֕mAZGBZ$OՕIFc~by sR J,PV8d.}@U":_PIFQ1G EmudXDcZ)"V"D4^ V ́Sl}6g;w}H޵*@ H tZ!ƹ` ctr?ʱnHEҒ)E 0 {/C2@~GjPg?{U0::3Y*-j⇃TDhr8HR.7^LTQٕBBU,ZYJL%0">}R%!\,М d!a{pHF8e\)IP"B()D8;E/ʘK9Os2?5}kPMn9ITR iuBI0  F1 R) faiƞM.E- v;P쒥H `ifol^H:=%`N@4z' j119'$Aۇܾ^b@Ji+PN^d>/^2/0㟳,Dq  AHceZ30{-#coFVHKDʋ |y@O@DffQgU 꼳Mܝܚ>nU펮[MwӦ϶%Yxe[6nfWݛvQLp]ԺXzBʽV/y.vmyz~jr{B0_wwwq/?✇wtMXX8\ޒqNwM _p߲m[%NF?e+d/g?.`XU.lve 8Zј%VbᵈRcq4`` bJa1l#O絖Ѷv^Z~X.w]C^lwck:mnd4FA܏c6q{]EJS*f~T)1)1d߹I;wiդIr L!`VxM.?HgAX^we~{+᳈?tQ֤l_s@5Uh`>pe AATRY @ڰwd\n9aꁟ#ayiBmO=՝Yqe1ocJgdҦMqdL.ul uDtRKWrþˍ]8y39N2F^ż3% tG:l\(wJ O=cF@km+GELcؼ_ !t7nNb"Dܖib.e[-a;K*"Y_ɦs&%87Zzkz7',%E?#F @!Ψ\Y"КBo۹~w]׫ƟǓ/U%uir8 v>۽&=a؂P֟%$g̸T8SDnBA=eh}0LԳA]TfSdzē6h$j|pQ& M4JC+o^%4Vجkd礱gh=gh Rc^#HQPg09'/C!H8OLEpd<,#Z%!Rv]웡lpzmBڧd۽#ib tţ;s;/;Qҕlw.9r؃#c1 Ѻ>74I|ijO 5Rؐm#AnNHȐR"gHz`j`j ba¨htw1 ຾V+%K5?3^ Jzw1No'OJT_|xkk83q7mƣ x͐aO#j'ݎG fq< ՛7i{w]z*,9<9+״ B{FcV $whX$yu%zx<.BJec *t,Ƚs.e/ae2зD1ypnG{m+no^B:"Y(\QH]ŦC1Лy<9 X?|χ#E$Skd}MiY()&8U ;h\0q]Lh<>o:0(Ҫ|!11 &jz_ݵ^ew6LGf? qʹyIA }itbpI-Ѿ=::<;>r*ZȷӅ3 mT\mhR*rh9h0jli^z0+^B6Tٓ>?x n ͏zٙpGÏfR+Ax2 &[ w㗊R<(!?,W-҉f dzǃ4O?^}Ͽ>p>V0Ͳ.譻:|MkWޢi&M>G=&>uB.maZ.I~f8M%u"IKS6.WMpz X unOW뽲_N`)ĉ<ҍ L b?I4sտ?&n?DOI"_q (0 E@GC`Fk 37sцG6,漦51"vCHs0^/:gETd*OnQUnWizӉ{4p 6[;{y>Q<Ŵ6r\U/NkۦsQF)Tsvz2#%yE$[PAףzgsի]ҩ/'vc)jG\c R]y)7~q(Y+Isvq (Eh gOH~]R5n)noԁEIhΘ䅅\HTQ ADМx:ٯވ[db~99_u3\>~mS//Э!(-r洶l$R2c";Y%q 42֡5x{19z{& i:7V<7=} ܀o#imaBfl LG\&ʁ2ƺH^Ժ4j(T{/ޫz7OmW}KY.`2W,s&\P"H$Po Sв22镔Q{_r;wcD+33A-M籑קɟOHr',U|ZϠuQ!n HUBj +CHe !!2Tt4GG *CHEF!2c!2TGZFXTq8TJPi*Cq4ieWV:6WnNJ̣G+mǺLH4>13N'g@F/翣A}Giv Z9/6S+*}EWTJ_Q+jFJ_Q+WJ_Q+*}EWTJ_Q+*}EWTJ_Q+*}E۲෩~cBm U9 +}ER5VWTJ_Q+*}Ex㊙J_Q+*}EWTJ_Q!foьdU+<<+):xFAhlQz)!<+z󬈽gz ="eRGAIұ&zpHf'I/ WX@MĬ%g`bOoO&fvzG*#q)0$ a0i& #3/H=C}oRfN7hqA7U%&Em•l:T&"ssB]@tiBKu1qC*7eNزuTr%F #sB!d ܆u{}.F2 -w%S4AK.!{H+!k-")nV[C~]Im-& pi>K7y( JF9!G YmVu$PMsyD[6JüAa$1#$.Y{XĀhtGFC+E>fff.3pahlHoU}g/j$|Y  R2)J(pyrqҚi0)4 l< "UF9&"t4REA_%SZT@Ii\IeJ LqO_ ,BRLr[ɧ/B%W&:9-E("- f590$&mFi+i#$H9e0I/cĻm.zH?>X 2#" N+ş߼|GoyhCM5AX3 p% \FA((r!F+,) iQc Z{}dۈ@Hޟ]/10j֓xڜZWMx] Wuik_<CnlLݖ+spH,~ ?lh WT{_/JҰS >.Blm-konM2Eܣ]q;sƓd,}2]BNU./MKKޟ${ǧNtۢ^7Q1ą2OPE+Ε;3b ۤ7{&ڹBoksoկߥ~==ɾmU4f#s:М KrPari G;8*dǪ\j]ZוC$geȽ5{bFn 4a4| gu%q%je^&Q` 3P79afiyoXsy3^far%/ZUTBBo1%6)1 j^-2":Ǒ`)0Q((Q18^9E\Q̂= $@'Vxd!R&Ӕ1тFQ4pH|m8 ָj8~:}r5~ɖS_VVh_LYf}/\Md Q΁:&Il@F3l12QI~f;i2ojIy-Xhm 4wFat6*R] >:dr5g=>NKc]'_Mzbbaey R)NirU v #1ۑNṱH+{$Wpnfw>L_v]>/Q1cNfyGϾ7|wگ'}3#^n83j<{o|[S\D}[-adY7-%=&)AVsR*0w C<5yGxM;x-_>_V52f˘c˺:90cLL 扊S I7^9NRu/6w 7d&mC%t x0MvJcr+.V|Þ鿛}y +:zfTx!L,ogN`cX^=izkv;f%ope12ɴ$BR=sӰ{N^ r}튾+7ӡ[bցPԨ=qfrdvb vJO!0$*8)BSϘ8P=x:Fkz4‚SzVKS̯P@WRކ7HZbb % t$a+]gW|`4 J4=?<?$z} lNz`؄PNr 3=/gƌs^@RAt+ڽt5| 6A=?'>nƯd ZZe@tGC%edLLI_jS!J Q4Gts!>;<u] }_[|iz!{XVͳ5*td{U"'"ShXRQ KBh( Y.%0!0&aJ;f2Rʃp-1Kȳlix lNG?CFu%VXzj7j ӣf).YRͶ,hAΛl[`\+9{ϔQsA5CR(*X6HZb$h>(]z׬Yy&HwD@RU@RɜRafXTqf)8"` (O ~q/?V[UB5Io;C3kɫ; F@Vm&,M7l9Ed T)J9ŰŖBkTSF q@eJyEJNc[d U!x,'IbK1 eˉG;ז GVĠU{a3B/ зKw9W"ն2m^%NOOƗW'"vɱ=]할ͽ&Yk\S%01`RףP b b090WwehChänpݫ ǟA}zsoow?}z'Lԧ?}zk8? K'aC6/gowIYT&]󜳞.x~e){}8~+c*b J?o_#]ofmO:p՚`wJ ? iϬ}5?J"}+ܥD"Ah.GAu:tweb}&wMp# l!=EIDk9{m1Kg" %b&bBcZoã{f21z<ϕ &jQh*&B#=%*LYeB(iuU|61tӼxG?=Q lZtkZ/aD{ U[Vz]qRbR%)^ڠLiL )&mCBNW{-審%w@P&DESZ fSAcrx"F⤀m:Q;ĬW0 9HysQ PfIن g@:b;3e,0ˌ=ηR Rܾ|ؾ-1JR0RK-x c !$$*"#S2 x@(Hh7 N3 DΦ )Gy`x .Y:kmkȾZq?:6$ݧ98g3/`PK K*XmPҵV~dwrЯp (sZw``Я+PLEG6+OYx-L9Jm+~6((dQ@0~V=-&\znԭ8kwvXEaݔ^/ ݼWSTQeM(&ޖWF%AR鄡￞6~Uqxsg;hH7`Tdc?U&^-/-^G(_Vo)t nWxH[ 0T ޟ ,'Kkrz<)U3Ky1Ѯ>IvJ(pF^~Uw>_-03g+z Atp.A7ŶI*^ .JO,(76ZvdRi<&m=̈́}EZzIeR BkCokEmSF'\5ܿ^WL^׬~,R0Oq%K&ߚbQE:_I[)sr; O2N2I~݇͸j#[a]lO|/>I+|> ¶yAIn hܧn4驂GídOD5t́˳Ѵ-*|bj"i'5g*k;\*Q!\8J+-%R|[H4kjzS APaRYU4.%*"Sh*(FјI&rgHܙKF40HJa"e%Yu*t,>azyCO](U$E띚a.&9g .u9ԺXns2M{ضf!,eQQknۖw;O\>,:ֲfiKvy]h~:݌tM6lrc-4M U 6M 9wH6\庾;e.Ufol?̿wTΦ߹cSvI"J̗yf-{߭6S>6)wޭܳ-6A ™OQ{>|l~i J<N\Ú֬a淥K2<`Z&%//՟iowr•S}O.B}~; d-(U`]u'tCX-t"GB v|0,B )na|rF¾UB1 o!B Bt!1.05 t U+TtBŦ Ƨt|Rmºyt IL̵ r B($< 1j|LLj9+\E44IbYuo+~*ӁsK+R-zNL;! -Q/U s_:,I`^ePTAKl MAf CdaJxLƒ^k#^,iiCˎs7/AaO)ceQ F} J_B:`^e) Ӂi]mjQ }}C\H☁ 3P rY =rvjr l@l؀710BB2PPg($^2Ny4(@%LR#1s$ H'Үa="c8ܶ;<!)P~g nǓP%X#x^$`w-(yhЫr-ڀ*".d[C LeD;u#`*Q((Q18^9E\Q̂$G|[9F$㑅H>0ƀCʘtl|0ƥzy @;}0=rEu?̮zw<.Hc-kI}kM!82(@^m$6`EF-'`+)LaLx'#X-1VYE 62(#FW1XJBC&w\ُzqVMW_C6rM0YKv,"G6%"AX g $-YeI#v;#%ۑ{y'8=_/_&0z[ w~4o4O}u|Ǜ~]ULM>cpX5۵w䮈hpM]M_V]޵%f psE*L<)SDc(@I^œ(aH$?d 77I.n;䌟~M5$4HЂ37(%6>2L|Կd@e= W7gWEkw_ *r$J [m+-ud&i%LWU]88q%6[Xp9Vt - 3*yXS7ee{iMnyi@UÞTu! ԩapUgwocJ/ɤ*ѱbWUZi9A:VVqܔeMiQơ?f:8p6py)<\sl&C ^S G;iDsP<|o"Xap)iƵ˫6]<:=kw߰jW{9VAk^\6zIQϲjOk-,-^·G3E>fŲZ  LG0KJ?3IkNB>)sQ(mhn&NXѳ>NR%#3tr:ab +MT}^GV&g(7}#|" ZnU*j~R*jRu<`(uF.>_Jc؟7- vUQ]4d؟~X^9pruui ɬ0~f> Wk?~K2j'2%d|8Ymrl2x~7eYzg;յw[!@c؄~6 O^eʷ]BY__%b7x,!* bNI!tHFym@2`XsE,TOXBP${PQuxfvVy.`>B+(`>kZ|DebrqA1LɘοgGgTȸ I::rڤec%xsg5+S=az?v?]Εap&HfaVJSܢ͟H'wMB. | K [(0@FP7X@%ݬqr"q"ap _)kZ[5(K)$R5,3V c rIH)!b_Ùe-4Vzo%a@Y.Dcld,aR@لQ(<(jڤٍjU@@IeV8e*"`D(o"X^](UIE:%aFB*DH`K@e L9<Zce`oisbF.cРk:G`e6)4i[_0hp\8X_Pw)q3ŀt&TpHCv!f_-U"%ki4,h w«HP4Vd9[,P^4Rg잮+)ܽ|5:e,w]`"?N?0?NYGŲunawm׻m[ޭS{^kN)A|s1wN_|OU~fbH~ݴA5HmJ<-'(o+-6?mN{ Ux0w-f't]V덄uLjV1 t)]Cw\(wp)BSϘ8P=0#,RFXp`"S+ru4 k~Š{0&)GOa2Z@$@&PY~^5,ro7{/i]N($4_մ~c$=9vLz I%X)) L3HzM'\J!KeT3z#{(prͦj7SW%9xֻ,юf B&%HX -n>獵mʁZJu࿁\gik^]wհW ٝ#E3MlB !AATRY @ڰv?:\ۼ"w3cxRNZux^v0/{I`4JUB _tUD> QCF-s`H5Q((Q18^9E\Q̂罇&G|[9F$㑅H>0ƀCʘtۖl*2>_D2h ۋo-q|Vey\.t*"_dOݗFk h4l%ŕ0L q˼YK5*kc&PfTet٨\*KIwUtZk8Sz8N˱)5km3(wK ٮ,3Yj[] _ѡ`GS;]yO~sQոTUkbzn PKg xT`yVBu> |Ǜ~ 7M䇻qirWD[4 '.ΦrmJ7 5^6fE⦷B&  =I ,uאr"A ZOaۮ׃}NW{&`:}M avTJ<=oISS{[Xü!*M}9crjr5 _$I/H~= t2GD"ej[^!(e`tU1"2,ŮGstV> Jd5JȁECx%xǘƂL9Y Bd=FnL[̄(5r!h)ELE:g$5JaUW^epJGsKԅ2}&("cG\IYs36oKFzCZƔP >,-aZFF <ج%wbYr9?ִ #gP@ɒb4J1. V1%<]Vv.#=̙hO.e)DD^otFID*'7tϊ*xZV'>ߏzT qRR?]27޵9͟/&Fomί7ӓ|aOfk+ڮzi]|-rq8|kƞnDc7nV=1mᨖOe.>. =֟Zy7{l}I64גjI'`cibQqxo4;y_UlMhYcv/OFosͧOo~~ ~zqF+0X{7A?z|#p_}Z5Zڦk M)7{}~ke[n@R~q0Ob\v'?j$_WKаjM,֣QYXZH-qݡN+:{OM<Пv7}rҍI;pU翴I/-^ʲ P.VZOs M$U4*+*S  FizC 9 H,'NXjo(0ϦD^,Un]C7=:RR 3txHKL$H3 cV_mzK@0RJyg#\SJ=5{ծqf|'!-Pc۟KH3v_k.]6C͗{˕OTA7ʎvy׻mZZ woA^9< =eY&s{K7 nFσxWyKZ1e^\7i=F? T~@J *iHXQL,(A@,T''d'+U%BȗKL,ϽB,!Ԛ )X:eMV(` fdn;Akl|)@)<ہ)~Mwoy&c@8x#gJkH|IPeRwTjUE5:aoj!<~? LY~ңwD/fBjJ;HL $RZ590k%IfW@8ReLXNJuNMԤPcaR{LPF' D'l>qo"yIR.f+I5I&GL-]K\ Y>{jD~f^E@kF&WJFc\d y[ED1t w8MאGvKC ͆k(sgo~FNRi%2g26N$e"ӕ@ZԺrT%mv9 i1ɦ >h|;ͦۚ\ci_,Nd2W,s&+}K$Zᓌ^d`))b@+$k;A Mʤʴ<5:Ͽ({"$Id=G1bm,aP 1W36{9m^òG{ޮ̷2El>FcH#&{燷?ƣDE/89 d: ^ycs0/ G+LjV+r2brm} NI캯_a>3mPfNLB;JOH`ќT5{X.riIkrΗmR+.5gT>͐}y۫YKo^m?@Ї^w}z ?zG]~ 8`:ϞN<{:Ϟγ<{:ϞN-yZw=<{:Ϟγ<{:Ϟγ<{:Ϟ@Q <{:Ϟγ<{:Ϟγ<{:4B* 1k*X?cx*2[eKg\78b#p[gΚESIIҡ&8Ԙ,YI2C ynby*^,D]Ȭ%ϢxuA9g9a.S?z\4!u@$@=z#IJ0Z;s cjR31sz^@RLi"7М51h@y 8@o:f\z:%hF-B e-GGN`Y'n}pҙO9լ`rhxLf!Y&)uAT!!~H; zZXc{cc3m۪hkYncPg|&s5$[7^:֧KƌUSJJ'R|.ewڼf՝6?ʠy$WkHd%8j3i3'BX>JD IK,̭ZD,G4: `;ԩqH,%g39q0It06ӲEl*gyΙ O\1W mدBk|~D G[k vi% t:(z#,gmvC]xGӺMZ%zk6S!vu Zti&4 sxB銞_fOos8 ~XudEOVsjݾu{嫇77>ߢ'x4ZzjhS <4o|Chxx7gnz$n7xc+F_=:g`L;w'pיKjnnLt]E욌sk.ӲTR9*tj[Pu7k+*W*2tpUZD ]բMX/ u;Sz YE<eR5I2)G\R sZe@9ƍ΁%-*NYcյo7 ܯߗ&e[b^Pzp8wn0r=mv&y|pטXz~M6fV7on=ٓ, b'_ܚ<فÎV~kI.9y杍OSz9ORȦolsM4!`8 Cf," Yg+VOQ1s&pCL]DAg˧Q+%F`-pk̢xN[,hV}ڭQCmYjNjw Q>\P!)Q%1!v0C:H.i~ -c6IulNRSŔaLfF)(t6 1JPA& $W9vԉ!*:+̒CH3?'0 gW=H10xkptc}"!܇_Aܞv=j~9^S/'gVKv/J&&LN8oȉSE^ոa1_]%batί}o.^o~0<-Y{9ZP~xx\WSp`wXA7'72䮘h> X[o}-ۖV߶9 ̊.䦓8$ Ⱥwv-[@#p I%jKewE2;tYw W V'/,;[ a(yaj`+ItQO2O` (ޞL6K2]ݴ9\P;-wNZn-i:q<`EAU;X ߱03#P-=ߝƑfey#TO4,P-,{|;FLU)TPyC!;y]ctסWr &(7u ʋu;L 7 $G%kf#(ezo8I%Y䏦xNLN!ɹ em:{]u{~-RH5]J:s|i{R{!ޒ"2 /yCTP1Rc3/DgWsW:"|:[h>ryl5F9J(^@U '{'(>zI!aPpLHXGVt0 ǨU1kQRk FzqyFN%U u%{(L+l|7uMyzobG?tQJ04G#;0![VTuζ&HN)vI+ jP2[6H7+nx7Kj}|t 1wDa1dNF03,*͸Jb,A"A$AYTy, *ER +?ah{d\t"PAc!Bt\a+#-sc~:n) ^R^Q䥣RԠ` a*D3#R`#>!vWIszxm r;oE :X'&8#4O:R02V2]':wg)EeIYīq/,M0!}Xg_~koL\̶U @iwл6#o&|Vk "V@b[={7E"H z]`@JL!aHD.&J J1uf6 vx1C]T@0! %UN-nɎ>% y"cqg`YMMLD;c[5^޽o'Տ b;]VwF^>4yv:AO?=Q{=2Uŕ_ b@?%1`R+Ih|/?˧?t?>,_`$gAԜ櫧|j+s>rü ][4Q#`R }Y   9\I]|_AoH9ݏKNDA[_E>R4T'ܗ7N;qOٱ}_쌏{O+Džo8 Ӎ/ XohPm|lU%whq켣yGJi/EZU_&9J)@i3ڂ-GH{L0&;9QsUb6孌!Te}4$20m9eA:+RK0Qup__"Pg.z߆E. |w= 柣˨!ߓh?q;=mׅ;|K0?v~79Nu8o_+{mGצ&H rOqo A3ŀt&Tp/ 21 hOz>#ē8ZҢ1QK,kib+BV l!*RH YI AHRVa ~e¤Z D佖1h iOf#e1B7AOS h|~o^f{O%c2{pǓy9fuIjED]11)zf@u1Eaktw;#z;8@޻Zލny\+oZ;峔j={n+0Հ3:xT0qE x)w"P~"[~F-~wFE);.2nӱX,"'^q}^mx0͐Ğr%/FUPBl16ۻR+9R.#2qsf1& A0y*bz[Ւ+Y0Xy5cBx^1" |g&51є1тFQ4!.ҙ3&iuNh:z={wa~.z|a׋{Zuο;țۼݮwa Q΁;&IlF3l1 Nz2QIyf;i27V<;3ёfey#T$G=R p`po'Ո*e` *ǡo Td*DDmh\w6]yQnx›)q!7`~yA0F ,E{Q1]Ae,:uqoU|IE,!]Xnu '1beP] w.x^}< MLr܁W"PHx3jg>LZ^1Tk%i]SñgxH#Qr 53#d2a q9&.&ϊO<#֖n 1ƎOO:g3wxrG=䱟t̰f~_Qev*7_cJ0E-J) Xw I.|uHSɨɈ9VZ̒x/A6Ze0 '9'\KϹIHu-;g,7j){QmeX,`EdZkۛ[Y>P~{7|;Q,J!JNDD1$a%wFfe0TbK+3Q0MM4M&sYN2%vgla8HΨRt,Aj4.DUV@ )m29QNG.Y!dQ]q6?jԁNk⠓cKv; /Ezױ3љPC\{|-P2 5ceF D_em]r%2贇Ii8VF[L~jYg|Xy[q 8j;䷯|tx+؝,:^P+ l\z7RJUȵLՄR_ JFrq%Gf%~Vwڝpu3뾩q>Oqs JA TB)TZ٤2ꜣ6Z掠G-<ːڢO18Y!fѩ & ]GtEk/ Lv/7%YC* uA,Ϡ# *.*/瓺o瓺7Sj:hC !J/) s[=$NJ>nkBJtJb \g@E"Ykc!_m] ΧR!9OktguG|;7?rؓMIќ9չsyV}nػ6= owq=8k~<Ϲ-x@&3B*$!2=x` 4lPnIeo."T[n|SGSSd!ՋI땻2uFZΌ\Ra;b\*$1:!?6ٜz.6 ;׸صT u5^4&qn{XoEfmrf@ڢQIXԙ2pF dmČFZ!2>Enɹ:3L(#q!08[vTIc3:б[3p,TEDqkZ tׅooNZ?D~Ke*ݙ,˅7)mWnS)3DC:>%B@D`WZ:`F[Sʛ!v"'_uPt\ܹ+(N%sci",'[2[dfZù!H!i Л6p_ˏ A? f7w}36oJZ!%zkTZ#1IIZmDAk!5QYLVB kkڪg2kJ&JcJ_5 x`%.%ơpM  0#-#ݦ.QG#KF"zZ TdJ&'ھ2 YqQ?F/'Ӥwoo*;!DŽOxZ | sR7{?@ZSUgGf,?^iH2x[]2$kLzGܒ!PSLpz%GRrxBxB(]=7m5KrtX >:t=IztwrZ(( &|9/:ih<{vJȶSǓ(n 9Iq$aSS'ޗRNӥզɚՅ9͞Njl\u?w^57M'g*fCQ-9{{V%nWD`8q:i-Oغ_;RInuÈڈ\;*!460XXverYor`eM/*ͣ'nus r^byta8Gvnx&}Yoޮb=w$N_>eo?7\}~3q# LGkAP;n2ߟJt547ZZfhgf\׌{}qc[n@R~~~6q,C6'?ikg`ў\+6Yc)Ӟyw]0` br3^ȑAsI |/Iהh_h]&D?Gl$-8]ayCD QsA N9gȂmwo\ =JHM>;DH˨4 hMVf 82K PCՌ3ej.-@or3 Nz?ٞ_kDymҥIХkC^◀ /,WO?>RV e@ALHB[ J-,L*hj0=r-c"()u&qqG2DL 9 '`ꐾ;k,pT]vf5dh^nm5>]IЍz_`QOfo^綢BW>v;!H8JU=8 .dK 9Tb,bky;aޙԠͦq@鑵AW-Bkۃ}'+yRp0nS_lq(AGjcY1RڑR!䐬+;|w'ܕ/X7𢶆 '8́e΄ Lj^26IeJ  mrßf IFrsCqioxe>\g5DߋAܓ)ulC/np)M9h܊2:\E\M|݇v3 $F@acji+H=z&Wi~CZu܃f),$P) dW@z2:w9ɡQBԾM#NO&?]ZtR[h *828DM1{9hpoMZH֦7QB*iV+,iiH4wD:iPw.Y֕o: c*l 0Nq8j%ᙢ0g“'mISaL4NУK?n C>DJx:Jk=^-F5}6lL牤W=(R09WFA|SQNM><#p n6r&Uq.QcI{|L<+f,+Yދݠ+}iK2䜅C5Ac^g{?<~K .«DJa suFы>϶V1GZ;R![1gfr^'6o?m}O`>|wL P=CպçS=㣊B+[f}奥W`FWHVl }{Rn$܁{sq*t:wx=ҋw+ eh-:F_h]u-֠gڲK Zi%#[)ֶ~`4:ޮDW*fT82 ]'"J] kv[8ג"$(ʛsqZ$Z7\+osܰw1#$ҒD Ԋ1 9Q3YE72D3CȒu96ht(1Xoϡ2GC-KVVp&0 B5 $` B" È6D M\ǜefj#̦\ZƝq Xg$F+mȮEO Ϫ}i`>d``%3Ơn%Q#RL0=I$Y|Ⱥ6`wsy]ΩWF#ixۖ',HUժ:fDyF9Yr68_m^9^7GMUEzr7N!íJd$+F[tΡ(sRB&uU2)avx7E"=IM -ڒBB+4_+ ҋHcFPK|S;.O|NIbѥ`D)%pML:8S׾"27%}Q$\ @8`RJ!B Qў6teGߎ0Hm2] _36XAX3zVb{U@QlzZ̡]K68U& ' $jeWɕ"{pyֲtXBՅ1c+:i5 dCFƺzz?s%n`\F [Nݰ֞l#  "vtZ2ɷ FԓZr.b ~Jc;[-V-tC*D4XYi)̨P&4Z:7Z qJcXwRȠE $e5̀j@o]\?A1n@Ơ) uڀb(4v }"gQGܤ 7!wD )_J&:&|K@Z\AN::]:∬ @M 4V J8 9r x1o&;"J@;h?Ն85+mWebE-]VkD5{RPBklD{FȼB~իf!_Mr^KYF!EBi5 ]V+ 1^ B=Vczh64d36W{º[/fĥ"ҌY7IcL(QD:IJ!Ԅ6eU0þ֦.(}vGVLAG= ]I耕RUFB1L8fփ|IΈ A9JIBׁt^Ue^i C.k51-ֶ53IV#FP.n[f= ]Z[ Ilf1kT=kM!JK9O-'on0&:Ir|4lDŒؓUwCR^: 5\ py݈V]qnA4)Jj :P22 Q%ftiPOdPH;@qoڮGdR5vŢ*TOk(4'Mm2X4ɍ0lnwhN_*IP(EcDMnƂGtvyZ:p@ أt%h#*`hAgp `NmSƚ,,8R'kҮTg j̤yhAR 9'kuZO;z*DES&zFScUU|bmQqc&X©:lti/zCL LvQ(KFD9)T𚮠:FX2\[c̑Y V̯t`"mM\4jUf5]"C, ceVMZKH2jN KOZ&K:ouhD蝩yT]pB; P GR/ZY|x 61 [ {/>Vd n t4xٴ-i}T?lڪ?v;/; k)ףZͪash᫣wu۳`񑰯~%ѿ s߿<+T q|R0^]Ƣop )),D|: (Ef)`m߼ʂDV@=GWiV@Pb+X (V@Pb+X (V@Pb+X (V@Pb+X (V@Pb+X (V@Pb+ PۧL(~2J OAy%4|ҳTcKV@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+(@<%5(hAc@2V=%Ѝ`6E:yasg~GkʎŮm_:K feq|Zya+tpsayq~_r__pm9|K/-8;Y V~cR i->*!ˣ9g]Ϧ[nw ] /bVu/#-L=(b_(b]y1rҰ!lL/ ?ֽ>(zD#m7Ck&MLqGfblࣾF9)y?KO#K>v1膘jH z-J%md0Y,ba X,f0Y,ba X,f0Y,ba X,f0Y,ba X,f0Y,ba X,f0Y,ba XيtY `X橈a-jз.&+d3<6kg%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V(>YZ/q.ףRS~7pߥX?*˳"m,\B;B.\dN9$kK2F.=SڹO\1?['ss<'G^ R]'F:0E_Wi/Pc^=́/_ί?eX28#Ά_WOFWO]e?-+ t!TzƙK?hR+J4U꾄AUXIKLYꔝu P`sJ5\Mߜ7 E_ϥ3^;OAU[Ef⽎i=")KUZ9K$*^>-ِLR/Ig#E;]EVkcA#»AZj#.=;޺,> 'OjNlORW蟶eϦݤfPBw ? <_ v`cpt2Goys w~ vTН/O6*/mUg'']kwL>UlOQ<_޿iy~\b7Ӈ?x1-ހooD_Og+̼sҪCߙrg]%c[} _55vZ{pm[2~ɺSk{)}j6-օ |q>5^,O3ڑfEfs\i/19~L7KKNjV@8{$VJK\?t>)pzk>ﳓ[Wbۛ 5l|60Q}']N*۟ZZ f=¹Kً0u|l$dj$lȴȦD i7g5`V{}&oMl)^Ӵp.[2s/S\\MW|5ˇAO:\\7㋏X)[.F鎘\&Kn޻J46_\'Z_^Dmũ(U~j"HId4 d9bsXBV)V̹2ލRJ-,2[ ĵq-pk Hvouvqeeywmd^;حTeuh< (^2娍TsSAHIgoVlt7pT9+lj meW'Bf봸W̹bY#J՞,:[Usf{W tzحd2cyԚr8G(r2M\'3z"wbFOfJE,>[1pE8EbCgG=z4δo>Ӟt}خļZkUԪD;c$b{ԭ>[ԭAٷr1_?YKW¿'z Yfxc9T Ry,?WPow9zMt~6|:2YUqP<~^97 r^QW=ؾϻv1\kc*9i}[. gh)xkHQhijڋ9: Zf恦O849\uSc>c !ѹ/f6F|z۫6ϩơ6]iC\Yf44/7MZDg*&Un{cFf;*p`C.ٸZ|*JI3x '̹?oY<ϗ}9'_͔M 9Śa f_Ywem$Im)}^O/=`1E,mC$")*ZiJU_DqNE<:<<["y1~20ovH\).fwj}6{8Vwh.NVZ=}9JӋ8OhSǠ.6hj?y[vma;1jT32ѳ,Gq@ q<Ŵ.TZym^;iؔjQ1P(*7cޖ '-F_;D$+aPBaKp"zb0,+0^Up8hjTscO}_r3Fo]X!;VhCS!B]KdPGAJ2NZw_8Q\h,yƒ[j46ˤ8f:~^dX4se̸$8#D!kHw/P#g8hPoWl$ҦHij1eEBj8-r ~Q1B { [Ixɣ{6kZY) 3٨ \Yϐm*009'(3"FER(R)yn4eZ1wA\"&$1Ab st., Z]eښ>Rro_n+7:)3q癶08MQ-OKڹlh`oߓ޴]aJ(:E8txYo+OZ;FyO!!kE4Ht,wTt vT4L4wg\3RVZXH$.':Lk\籖[i1HToqgAR Pǻ}BN0ҪvE/fΨHzw:Dkհ!&(Ut;]Z- Y.m<'`T^`mMփY|#֦̞R?az=*028r2pFë2+FnôQc4!#q}ۦah0aV7`w;w1KqӘt;ݣ.4jZgiK|<@>CwiKrAwSwt*&9l8=58ǿsO?L?~헟@ v`}aix 7LC 0bh!C6W=CUm.a#([[+C51i÷/tČEu+oA&+0m`ٽаy9+oC}h)_Ts] @xo ^\r]ة93H pJBЋ,Vb0AAHReLQ)m"4(n!$kɭ 3v1MFd( SP)H;iW܀Z褸-:\*sk4τRjgK @h|#SOrƢ\U-q`A9`Br9M'6ΡD#EဌyPnE 1ʹӏ2;t2"̛ LEљiRu4R R˭?&߿TL38R#-CQ$T"uJf4Qz#2a bKBXZNP˨k~9"zRhs5ǧƷ2wn5ѝ&BT$X I`3/TФGb%Xz."QdaVw~,wz V> gZCk|[ca7a\͒DQFFjL - /(u./Z̑RE~j~Ֆ\WԊPCIprm2 [7c3sx5NhvB h1(WSevF런$ di| $D *U);l<+'Hۀ BIE&Xg}GyVDށ3#p&Zg?\~L< )pHgH>0,KXjf(V=N&cT.`lV@p 1jk. ZetŅh{P.iKh m10++UcJ#L`f{#< }kw\S#ue!6m'9T;!=5Tl:HƁ1kԳnhsSy9RQWG9 Z%k1$QBt)gjkv.X)s*̥)@ȵ)J'AB1g-pH>*Kl0BP .<#YdnA~ڻ6m-c9#s ([7>t%n6n)b6qb$(6v6鶙ر ͑$j QGe02)$62FB ,YIL\2Û,jQ1HTc411 Asp:f!K c F8-[DZ΁rv6csՀCc[)kxW-Z:\Mh:-ʓ b>\z.~Z ָgOrԪ\f 덮.`ק񚋈BkhzwjO6{Oݬ;Z9}S+J0ni͇8>5}uOYUTXEuI/7xcO=ZosJw[-N uCVW5ws$=zmyFM6.\ٮ8n^[%^;F)JRk+JR\zm%tV/JRk+JRk+JRk+JRk+JRk+JnفzB_n~v\7 q5mP*"rZ#S-r XeT)Yt[]D> 1I  T`,9PQS(R)y*N:Z'mH?H}Q-u"!D3[>frY)Lo+OZ;FyO!!kE4Ht,wT<-]ZRݣiIK&UГeɍ\J]]#<2low/$ E=͖4 nMR]1*Ve'I ׳YxA8EL.#s5mIxRW*(6ZpCnBIXcYS d,r 5SVͦUحk0fK\>%H/cw[vry.ZT^ԥYX u1Hh"tLjrU&|1\@ .+<%RM|']eGX:-磲#pvUZ`r*Tl ޛ6GM,٤lk1/K^ mm}@1JJOzL>ƈHoBxo>.n* D:"!uRbH+ #gxB ;^Jl1Κ\SS.ӊ}l#QŬj`[6;.`}Htf*BPgiΤ&>gtg͜|Vj6N>u1n]5@%(*qQGUY V,w03 )o]T-7"$ =E,t[<]N Z$]& "ܕ[ .Oت)eM5L"JwZ c p~kZb:yo}4՘5Z&i^P'dP6(^5uh j벬=32#ڛKt ud8x@Ӂ>O_{Xm>ע(YXzW j+ء&4$VIpuABGW}NW, +f[JIN1Vhk_J,a5(pm1$Ƭ9me-ɸ3"ٓU.֙_O|[lަC[[mAg۷!=.xDA fNf[_ ;.fqMiM:[M7͕b#B/ݮ濈m铳9{ayKsN;.zweޚ ʙ9zּ]_qgn׳ٖ?>}~69˟0i6]q ȵuǿ=u߀xUٖs^3]7c+С#16?Rl`Iݖ(jX f0}|3F= u,|jФ &BcSc}->t"H8d#zH9 .6.ӵjj (І@^QO{O 1oǷr?H3SaZ5Í{#;ճ:/D^R8Y,|7فQ %x`s Bb2Oȱ6^quf'麜٧6.Xv^} [bC_?,K}g&c[zBӆUXߝ'3)o/U‹u0:d]*>RU<%(!BL7Sr|0*8E `%+QO\ؕ $o,Z9y(.G˜.Z_/BZI`exc>ܿJ?y4?I!<7'o gU2+*V5+.ٙ3` "]t!gJ(EX"- |e)*<2W+WVs p,LBt٨5BR9{RzKuB7bAQ6[Acrypτy77!N&/ʈ]8Fr|kKXaeqGS`YENŐ۪M*:d+H3/Q6%cMM|v7}pOQBj.Uub*:ٍi6婠vRQۏ=2ح{ E 6X,"n[mB͕Te6 ] 3 Qt pQB(%_Ye %9pߪ>7`<D?^iDԣ%r!ymك LP\qB9K+dF!z~NLRݙmؖO4&)L ϨP ^4s]B_fS/xѬ$=x/,K4j{杀ӻW8!>uHlT媔r^V[Zc(O2ʓ< "g %8R؀Ցkl edSHVCoJ#7qro`<`R%VS6DElP.+yں9{f^jD(UwWח_VF|؋V7?B 2͇9 ^3J[x@"J`Btޤ/:i[o$NMl+O4D)W.c̑bb]D1T5 OOBIݤAywlk6tLWaǙ꿣re@ Fpӣ ji`!\ii#h G>{{^s{AϘ'd}5pl? }o>T(żq79=c#Ħʃ/Wo2mH~X00!EȽfֵKş^):,fZXQ :֠5/iKpF1~AD~nLj{_=Tvg6hK5ג3vkoIn塦_6trG|3GxYٱFxeZ Ģ/?6t}GNJw!7mL MF|zu)#ʝʭMH2gNyWocgi+li6a5|<\Qiٝ +Io;ִfڃzmCU jswsx?]]e̗dUV m"TxvK ͈dd_/e_C&'(?+sN'F߀_2e5: Y# .E'9Ҙ}䜌p &099%&*||u*Lƹ-M'UMkB˹>zL?R=ZGtFb7l4' IsY5'ΌoPsёgWbp}6pp.pլ%{p蕷#\Arp½;=w3Zt}8Xua_Lz6`'V|aF_?+_.g7SYZSsY/U~ﴃ`='nI:t37X9unVa ´D^U3J%ppլt#\E"U3O&nFz;pC{6ͮ.?o_Ws@BB}"_"Hea! @*\[ȑ7QA_K-l/PIc U*H8:SVlTL&c-#e+#ݮIϙ&6QpUT}j؁0 ѽ @+: u0lLFɤ'$1}Zr6;KDgvH砣J9wI-ĠMtZ&J;!l)Q4;Ŧ~γ^ Щ\Lfm/~(Ӌ9e4|!"嗋ǿF^`v]qӫ_L?Ng_RbM=y闶_c&n^Rm6Kl^Aq埿4v|xts]~г*pFE#s̜D/j{0ڍUvVp>zϻ6T:X~? i.,yJjsVǽ8%ҡNmS hSsU Sױl Oug,jOKr R2FH.F*Ot՛p)|0*8E `%+QO\ؕ $o,Z"佢ykK7svhyߖPEʺͥ[?9m.ca䟨0kV\3Ag4pa08DC*ΔP\T)DZD+2:򕹦ڴ#j Ls p,LBt٨5BR9q;K9 v/B ݈Wf7\\e.=6ϫgO7!N&O؅cd.W4^*+;ꘂ*r*VmPwElK4C"eS24شg{W j RU^'?ٍi6婠vRQۏ=2m=^r*l&YDB])j[ems%DAMh%+kW<|In†j(:k(!Ss q/b⊬^͜x]o9W|Ymrv,`Yr$ya+vVKEY$Yd~U,VF,MKmS%,U';FU"j{0QHLd$&9cu4(B̥(19%b9⡧YKNtlt{}G`_ty.]TWuwGLur' P.%%y1\KjD%¼ q$"{ek 0|so@x f !SNQBJH-T.U=C]iyZV79cGRYSjAy`yVrwl<-KDIZa'۱ MWǻs5JS>iڒᵬhnAλv8:sۚ 9փ|E$$ BT=Ǻ4Aj69@|iNR5:>:F6wDf1dNF03,*͸Jb,#FFp+{,? 60 Uʢ'cV~jfgTt'%B4]P [9n?]۟K5핮 L)(Q)kP0EPR{)fdI;FHOqszxm r;oE :X'&8# ͓LbQ[䢲]%Fګ=L{M3t6 =?Ͽ59ot_L6UfM)0o{S =ɫЫYpDĪW={ D[3$k23H)D0t :"33k;?LA^.gJ`! L$%ULn}wQd_. i"cy| ݕ^p߻1)[췺R }{>9_ݙ۫ 9(9#-T5c|QinϽ9խ{Ub_KWoՓl1 k?jm^풓7 MB|lCi$Gb|HMÐaf,oO> G|Lh8-d1f?29NG%hI64הntf<}6$F9>ýQuvЖNV\v ?t?.>}|/@)5&`y9547*<g\s >dN|Ҙ-Ͻob gO:pZhM p%q==? l~]%OzoTI]R(9LrP@/o Nsbj_lVBqSWn$- 22p9'1r 8cLDDbZhCmxvoL%FCl\`|4kJbI,Q2Ut:YuZOmO<1 ӼT퐣'O&6.~9~/04{)\RkWpe34TၱXhTp6(SX8SoSgNXo(PgY"T]lQ`Kphrщ2%-?H[ė0-yMrzS@0ڂ-#X=&zG2:5Gn gq#Ϳ|(됨z IOTn_ZyKoC/Ep@0M]Ҙw|(3gaK)]w4sx5 =lN]h }ӧ^`|yʪhWCe!!-.CoiM 3u 2X(@,.YRZeX@,\''lΧPVWNY$SfU&B{K}41R4r 89ՑaYqGkZ ј;A6rv;֥POO po։׹`Z&g'7mV9ZNR(U|7;e;Z:-,XLA#uxHT S-*ZQ= D Afu o WN èTHq}$8)+ ,z̝vX-"ro4ø%BPP094)rz_qai>kiKBݖq+:#7 `*2LAsZ,HyQZ%aZ n4 ?@ޔ{SeJM޽>h&n#,E>9\L e}+H8H~R;;;J6V_ (= x)YWșwW"7o+|5D i lMWcsoe -@vyR}T~]yի:[?up? 0{Q1|gz;N*}^Yҋ zv׎6^EAb_AƄ hPx" PkR7~Z1B䌂.`Rb 𿠹TwgwXL(7zC 1R 4>pʸSUˉDլRdxԮTO+ ͬ\8/S5Z4~n\z R0RK-x c !$$*"#ut' Uc,[_=.]C&WU qX+(vـo;4 HsݾIU R`ls3pRYb(HU6(:+HXXuXEXXN(&؋eq;o(Tjx )ߙQ( uqYAB+;A)vhKBINiy]~u7ߺHTt{$lE* & \7/>TIY=vPƣ , TK1ָU-$HZxξ zFE);.2nS\,D pf"V*VzִfbyB gpDEZ2FH]̻3o@)OU@IeVѸ LGGjC"JFc&=j!sRdjk׮h$JR-VbH^ @S1'@CQk p.Hל4xE<6U^o{~4WwtM crm[ ֜Mt_!.۲h:JMpgשC2՗-6?m.)/)Lw;JJ|GJ|G0I W`&^JrRUV趋DTz2J҉Q 7.m5lwm~9l,y8g,ٗEx$GҌg~۶ZjK- Ɍbɪ*ݨf>:߶DpV9D$5ſ{79r0?O|N2^>ϤWUvٛ7I)l?>eH+]-ڟ~#,!-))VP[&ߖգ~Cx˃Wx^~?g Vr>8[d;k'UZ,GԣBl v?pu.¦'X M ৹_8-RCk;+==.4.oF+NV#߳|ͮy9!Altj)̮Hqi0O63͍ۄқ}fo6϶puA]4U~~? ]w$-"@夃*1ɜhmoZ:tB$/$TGҺHPlQW]t>*غ_̭Q +ԷsPʷ9f/{=A+٠gC~~K߳W5^qFn#M8fV  ^Oi㓆:9]&>v%xX.oXU^m-ϡNJ:=6Dѥxkڤ}OG}>i49IX9I2}*[YZ}k2K8L&_֤aɁ[.;.SBC"U‚T6τU;qUWˋ6r73K@gћB*of,sG*o]p[ E;q נ?v|d|tikd@U@/((_J3KD 83Ô ~eG)բZ.˵:!ڨ!) ՠze_e1*NVkO& =db Z{v]{vk\p-89RfX*y*YZC}#4K }p +X4iғqWYZf]e)+tWJko2lfRz/ts7z9x۸X_Np8]{V!'RKoQx64h^,~#+L]JrI9䛆umy}t.*fqObVcwYJz7 ݴ& jLiL(Κ׸8?ǁXf0c8~ ajM9~cNϚ~08ƿ-2-|q~ /TM[nݟ~ۑi(S^qۈ!.16)Do%Lq%\b`}fQamv1,'h Ai%=,%_#1@8;},:+Y\}2p0( wW]Sи> }xV??|oP8 G8O8aIAS[GR.ʂ&eڀ0*P6הeN΢ъؘr5E2TF$㄀J8mDKUTPp6!c4l2H<_anګ:\7$~}i>Csm?en_?q?,~K)@N/u|OM`JPpUPV"j^A,E-0ݚynECDzń?B&ĥEw.@XJxD+FK { !c^ToboӳEv}甜BūnywmNkst.^9WX@7هQQp9\(Y63#bY3bKҽ.>(Qbk1z"\5"&pb'"&OtTȬ3E[͉90yd"ϖr]v!VM/b |a.9R" n>y^|Bp6!GLsۦ~>7FGQsu-YW)Q#U7+qcbe7:X>SoM~h}PjIfsw'Kd頠̻43)k?L~R1!7B] R +Ǚ%L"C}TVNZl/4ֳDNu,CJL(,_)Ld 3'LX"qC:&hǀ0K4gR oD8*E$+p-bl>qKS!]˙ԟ}r'j;YsWn-z"7SwY m/ב#% BpҌdGGRMԪ7"ƻ@o/vgbћ:uzo>.U ѡ<t۞<ޡ8 E=!R5%5 '!RVUK.{sbͼ/LLd)O22`4DI0c6`9 DnAkfVvs'HQ2smh1$I$AUI !=#: seZiSiXL=tuC dl(c:rS#}'5j=G1 b N(t-dq:(, ET5WE 4b5$ȃ=_]ᒴhjeL@* :]%5:-KRy\򈸤,%Qv$t1x> 騰82( : % ,($AWO4G<ؠ >PBI=#F(ɂ6]No԰_ ="K42fI{j“ÇZ* '[IZ_9GKU] EKRi*a,A1ޢmk)A%`Em(y^51RY𹎨5sULF J1qJ 9&KRb8V ' SCpTyDsZy'g ~LE1r[T M|yCCjBh/b:xr>zO-nEǣԍ`BߺzQ]G#3_}) ?u3 p2Uo2oܜ5-B!dg18Xb]s}%w}2M*馝*ڥSH@eZKJBX;"D锨#P%9<>՞ Z5Z%D;9Ỿ{ Vǒk1lKVf?Q}#-v]Ю?#?t[܅3:VC톶M#1ד]Z4ma\?1y>~#ZFqhCV|p:Hfow6hD`F?HC0@##Q[68`E%4s=oh` "24)Dr(ڍf\rc"n}]Mp g{:$zrg' s<Z;J|Pi{DI^1^2T~B $(HG IpQQep@@FeL4=BryeLHm/7RIIX4@EJa:Ҟ81nD) iƮPw"ftKRߦ~v5m\ c6Р| tD̈́FZ%ay4GjpC4Orlb5ÓaFs6^ D NHD*OVJۍGA.kӎ].u{>'B4KT>gKi$xB@)aP$8Y6>Iwzh!Ȣ=}MڜWGօ$H^8aMꗙc<XL?vG4G=F g=բ %"8K($)Z UTThG&[Mx=b1q:g{^:iɮ~ Ebe_7@ \e%xتQߖrI-"Y(yg3`a K{IJ 2\pfRǃ;p*&aKxIĩq!ڄ`*QY5< DybM sbt 8(` :ĕz2aVSw׍>TmFtLI¥D *\u'[ɐ+ilRVPў ^[Ϩ.L8 * R&XH(Kgb֌IEJw/&v ^^$-껓>&BSf;31KřWm%\\F)xW#1GNtsvjUu5~>;{86AM>{C ?K}i^(N:飘\ Sy׳s)aMJ[TȪ]k=c_߱1L×vQ "qC#"m/\fv3`2AH[,9s~nId$۔% "UTXc"@\YjAk$gJ:\g +7Yw L-  g[$rZRdSNX3ᗖ TIǗ×58c1eqh _:xfX[DS˖V{P;Yn"A*4llOd!m[Be6>!Ix'WdR60w]>T׸gUfa& vLT},qo(fܺptֆe wLRvHmUh(ΰ lǼsTʤ&MY=͖uf]~R1wCPSj-_e3+$@=1JBJ w3ѤCwN<\ :eO-b8ʏ{"XƬr̺׹,!$8#J4_Wqctz$nEH C۟K5m0\TBsT[`VhjAs Ӽc7'A 38*E XpN()o,#1С r{G, ٫҅_o{W7\{Ǹ_[V l=<;sC7y{SVn5\t1u+5h_x8z+d}MP5hda$j9M&n&W(/Vg" U)XO] $zs 9Kx?r2'npOOëL>|1rp^xӻt[ԯj)Gi̗VpYwΧ6QO[Ƅ[Ӵui]U{C뫳=Ġ|sb.ܷ0 gr{ Np4.|qqN6# yHmða,nȴZ> UZ|4zr;ffrbᝮQglm9NBHXhrד^Fÿ۩ k:U't6{y/(N~z㇟Ϗ??>yq+p*VIi'pg迿e%+547в䬧 ͸[ƽ>1s[3z\a?'!@N;듟|t5"gz0 l~Q[gV/oTiboXZ)LrP A8'q9]/GhaCt)+7".rDF҂,% }):)jMK%ކ̫j=W#HcKVTר,2LI;e\¢YbʚuT9wWy"4&?tr?D>Z8tt9m6<_l[{k0vzZuNVW 0{Q~z<4-{ϽusK(QvG=܍^G_ّ:U? ?y*i|mPIyz _"QTYgL֟|4JHadԜ.=;Pѝ/3+_F[?hXi ΁Eu*D/M TJ5"P`H߂1!+=hT9o|a9[=[HTPo|$7|Ǐ-Uf?7֭) f _/RK0۝mʱ3d):;㖟ie%np@9vJ8#AmD 0!t/*=Ži\(r^B-9)gV_gwkzV@-}3,{W.k̯ ^o۸ȵzm^}n9<[әor%s{a< ׂyoւ]\wm5MnP˫&q%CyVbNV{U$Ov)K>I>I>)l0^-.|Eܾ ,xɥ5:plL2Υ "$Ik*@=Ƙ24tblv>JCr>=>NN|SYL~O`}}U>Pqn_EVX*BT"A%b|DxQ]QsuE50ݾh)"!K69(IX8I;iQNpm `BT " EO#@ehUq ΢qB$B"( CblSˇa Y]xN%uYv_Nbs `"~*#D C$)@[\Q]/%",hIUbU 0'e~Ɓu'Ib_ ;lQtf'jieg_J$G]L%Uh%tP o2 <"xpp`BIF`: ^2ǙYQfR>:`Y5pO^s&OYKHR[%uBҌdGOٹMԪc";Eořl)-eZ<_MW- 66,%K|7{oZ8}IאU r`KDs B"+m_szRύ>o]hB>4m<2 @SB/&V&w+STJzyWRݔ9-PM;jW[Vز]wzmi=/\ƣњfO[osfϏi;ꆎŋpJnr@z:ߚϚnk\mmхF} !p*hul\{wQvMF8_Hj>H/D Bi^ftTJ4JL2:΄ X/L)$H.*51pI! 8,[SR$FXBVjL;;H^]'-}gwx{c}k~eBf~^-=?4Le[ޞra4 j_@&8]}3F,NYicQ@Ywu;0wvFQ~~ z -} /nMwvZ%* 9'N΋9iuu+/iY! h8q_TiRysD621(fi 7`XiH`XQeX@ʘb y08^1b4 \pȀ*(¥g۸2/w/c<|4)-6Jrwq==,Y([Jhf^@PA ϑa̢yjoŪq(G9$OվRak)ur. , ]= T"6xa^ɹ:F$5p`:E;kpF GKu(8F$=gc]6!3L,gj(hk1ubl47rחmuӽĀWQLݼl=U|8.Z W5KOI;'LkB +sXSG! u%-Mr"g.W3fa6T$52J1hښҹ}FKbLx 'Uݽ8{yW0s"Ԗjoq`W`ByN10N|&͒I쀪e9CY\m0ECYJC`!ʖ2 ?-*W7K IZCZ&Fqu<*M@DUVw5{}6{}q0|ܤj>̀Cf>B|Qg=d5%))hLRJ&hrB GPefj̝offQ'x`>?=1`r2nC1 J9eRuجʥ{3ɺJ-Zqd!zaU6_*y.<1+ø2$ %!)Xo!ꀊM@D:Ւ(M"Jv0^ on+Y9'QAW쮾4J3^Ao2~fk:f|"eG^]t>jdfXY!NM~6cxqդb|]eIVy9~]unXY1#Eu5 n)5I4ըdaI%9:S?qp(M=;7[K-QqzB~/*d!n~<8;#(&Y|JE4!b}jtuuiih>fkP2>;T`MV&rF#Uw^ ËY(yy.::QW𥉉w]]M/&Fgcmzms|v21GG~ Rwv>[07}d$Wgq-:/9!m-t kmS`y6'܋yߴٻMּ_[ed}lk]s.݌8#aÑϏj VvǿPN}߯O߽}+߰&p*Z߶l!xM Vilo4hIӢ[:|vUWni 6l/f~j}轞5;?yf՜żkr淠8/y{HR)bn_B4{7b iTBل䝸/?\/Gl%n7WP"4^d"X BDEu,.cJn%Aq# 6BbvyR1q6p5Hs^D}^?$# 8=,|˻§T̕ v'Ccǎi$; ={6.! HSYԟa%\)Pތ):<\uhfUw^b{x/cYxe]Rk$+ >!6>'3>Ŧ)3m\3)8ɦjK-/IBP $JIē\xFMt"Qg>7mC}f)OsgNן~~8 ďYؙ||Ë /~#oU®ᡟpt\_8?7U NZcv+'8/$,GlY/!=/Nw3҅  ۈKC]7s)cSƑ.I2jkI Qzv5蠔mfQ" }N>h 23 0* l|8op.'|kB ٥QZ>D91r kZW*C~$zCd,k'"Fk* D &JɺZ)"$^d`ńuxV~^<,\YJ|]Ue X"Ţ1ۀsJlW}5E%*,*IWi^,d C!W(bTuc,2bV%`c{dc4cof64^Թ'Yjyu\I;V̚2avRui#"E2a2R1 %#L :O5i&Ζ^ )^#}xԥ8dILo^~E^s rߖ}|2*}ד˾OS:nk!׊6ԻykazbTou>uu7k{tC6;ں}A77w>EϷZׇdMoGo7ϼOe~:Hgetc{O&P]M vAzsRU-W/}Wstm_)6A)ctrJ מyf6S;ƏAo~#iJg2ɦeUF%O0_Cz,m-FrwC ImiM&y C_W3^1~~_8gvov~pZzjKWO lHykM"}3lv:_@_aUwuJ|$=Hg?_E%OogW%qMuBH%X#C2>]<=~:^~M#Jt`|MUsXe9l2FD1-EDqV ETB$gЫp' eJ"}dZd}Af\Q=xkE"YGA+kҹ.es GATn,4{7b9! (:k5H)qdbbQd98kF b3x."BcDD\w"R*LiI&˚-9#R@[/е1@ "L< yU*$L 9rk9fEG/M#b3q6#'į:1.Nwi<Cc\ .N_J:.pq_v<o_i;o7\\wR:W~ڜO x]NIpj?MJgx8WN'<$% ѽ&\㖨*vFϽBy$SJ$kp{ Hvd""f(l3 rF[hLt ނl{SŃ8Qbh )6S(e. 8o&Ζ7/R/5ts/vxy@bi._YL; ū˓Y[3J6goZN *DU"ɺIvkUbqHEla1+ےiK !HD`s K *QzNG6ECflG/+NYŤ@nb!:6^i#s`>|"t:ٿ#u.[fW!ur y䵞K# 9 qWG}#j뷙}9 $7J*9eeIlbZ hvAyqs(mt/tVd7>_.ڭ?WQv u/6}+Wf 3 /L-T)N.:㵑ʉ?"AڊBd\vvUj< e0 l[ `2xpNlPb"IڒA%x/DM.#OEJ*D*Ȁke8r6*z'0kU1L0NxgV9Wiʞ|>ق |r\[AW[lT_-QeƟVqxS $Ld}P>[g-iK4%E(;?X}3J{cu-VG ǡ:CzS:.`% 9,rNde&~t5;l+.Yp&ia Zom/d2&cB#<# qЫq7˫}Ejo$F[pj3 {RMz3sR>W0B\ CGw͸n^ϥd 1ԥhR|]f;=v{$K Ү;c"6 Xymx|^A(F[4$coSNw*a󠱠נT2R^)nkS]|=AR)x8lRIkKIyO,f.m&ΖiK=ߣ Ed;^Zy槓LH_o&\=DWV+^˝/. )PaWf7zRBK ivh_Ld"/#{t?մF %Y/GwA%ʛi)-"9mx6{!SW(Vcӌo?o Iƺ݅ ߐU#xֹF:Қf-/D9|PnM.Oyϵ4mue A YJ31B&"> Ƈ"D0*'s6AQfg"D֘1Bd*h5ov(؊h"WTn &>[T)uW^2i-'(֪C1 A)Rm}yarۋyAoѿ-5Gw.Ώniٚ|jC.hZ*n,.+^'NjQ7nT}6ewi`ּn1?ξ۫˓o;fhœ?Fr::>ͭ7s{k`<tUA||o94bHAG:[5Z9f0g6Z':uO=st6\p;?G]|/?|?H?-8̋JE/xC[jho1жS'|q}G^1CJ/Q^.iOW|;F\oH\/BztjrD<W1GXl6hz]RT^gC_At#}'qO9Ͻ_zOx.w{HABB"EQxDdZJ6,B4 y/L,Rpe RҰw>8D;b YKt_ Ǡ*^)ߩR tٛa d\Mumf:VRm%hg/OW_#~pU/I"0/-^ʲ: _C=, .41X2t!tN..Z͒\!_wzB(쇹 b;>ˑI2oXH% &GfWCE+PD>{umJ9ɎP@]I DȘޗJSaR9꜃ʓe9LdKb >)୍adȨUb)Σ̓CůGtNs 1Kzw9񙫂.hVtnCs\-|- 96vt"v$>njbrAHOwݗhETo T&;tVG+\AHi3:ϪݐK+}w򓎴>X!Y{S5A|1EBi0cOA7|nwt^1?SS?<`mj黏AֽffxOxn}.Hgwu=t`~^RuiT4+/[ n-ki(k޶vp}6u"U67+6ܐ%;9Q*Ph@X,c~zQC(z'܃OR mKD X,5裇] 8:dBI:P Xe|6KZJ s! %Qfڥ}1 ki*Ŝ}i#7?xӝ`>>cc$)Ʊg1v&W_ӛirOo~O&s)JG㯇,:"/or?N3o0k ?ì+R\idY )@Y߯?Oz޼r7?H}ЏC{˓-\=SWys紑|= <0Cz7uQzM?^oW7_?L߹Ln0_b5L7sg2. ~q@8:/L*kfsW P]&gTgk|Rv29\'Ȝ$<;xWm^$l\3u,h1YJMVb:z0v 2T.,x8ɺʕl6r z\8&3TtI}`&\YV3gz\~L+ CZzGK7)e "ME[fOvh.9if:ͰLb>69 c$I)-2n|;jQMd:ҷl_M rok G,90No7(ۇs~Lnof6sWM FOիtO;-B gbIy'a@Gn:T^x`K{wp_Dx̝ v^yKެRx 7yr]F-n bM[mFhZmVFhZb Yj#4*j#6Bj#6Bj#/@ߐԶFhZmVFhZmVFhT~VBcCzdz ]y7MB>~1;tu2fMMM/y~qu䌋#gșDN ÌO1(,#6,"0M$:nd 6ҭY`L޺`Y)!25g(9+jlax?){ ߎWor޸X, ge>~ xbOߢc˗CK2.cA+T":FQ048PluFkB+L 28EciIhz;$PdXeQVjl5"^y:%DHju 6gtJaܓxd,cM3٤/y(}!tCo>XT .̠ GafQPe+RtYEnjk{U]Uo9d!2Df{A%I xD=x(T8UJxfY3q ez1SګеC~V0ԆO4T*|.MsrZ^nXi[1Xn>GHx?otյ\#9ƙE+:ieS沇%)9FghcͿȳHI<{3ܐzlvحfIu[.=w&&ȍMF:#ݭ+mUq#AC.tmJTGgwtvnzSsL\tq\ܚ̎f޺e757W~8ygbe+/ƣц͏nlzx]|ncX{-d%9߭Y%ArIK,*Dt(Ua: !uI9kGDhK_ /]n4&a ŵ8&YY=H0)q|*йyߚs+ ]+EcKaf zI$* VuM+՞k lCR~W#BPX©a*ϺV3g{%M3 ̧dɊVgO)$m/ר3P7SGgۗp0r`1-,JKSd& 'nP'-Mąh#iĺZ4cr3P%T&fEt)bQjqR9;,RN n d[^ !Kj\  l6AKe4FI1YH9cb}xyZlGl?K\Pwڦ2jڍn\L14 PR#2!dd.LX@FͨJ<^"f%ĈLQMđ0>p>fY QMUjl^+`<D""VFDlq#@K2If*qȳ%b4D@%A':jW-rH/ҍ݄۬ hg|eIs C %M2|ʈX͜H=d~Ncռd_\q6\lq h ʨS˄c&j@fr̠*hIL5X 炇ռcO<\N}7*.i [)+C&!¿٪E>X!/GP"IȆCmnl}b3'oyjEZqU+Šl!(SA1 xkqutM *˔eЙ "9D60:̜cػݎG_I8~{;sfķ?l7޾Z]b>LYZS~6Of "VX/:+I@UT*tY;Ex:n}Uru%UCAǍB *V{9#I*.]l| Ӛb0늦,O,nRֹ;c{Wco&Y0n}upL:'qLa6#_#{-<~J[_zKjِ{s&8 (Q@thSZ ųW<# *QЦNHڏtcx7oI?!YA^_zUt+TޟDJDICH$3G:}hY_5"pAuI~ܚRYww(^/Q.;m^>,} #IeCh4.9_d QD@b=\&B复Ex)ճV4O NOMF9B|RS@N.E$uTgx3J9f~OQ>dLf+"8,FZb49IS N@4)\v{D.ۉ2g-̮>ֱMYI)[5(c$OBI{ A+f Ii!憱Dj8V:Q>pijl#RPqs.Ȥո{Ж&.ؚ8a'#@EjZGa$L/]o}Ɉy7)ke7#7*AB9xnԒyhHɆD׌Jŭ Zٶ|R5sc#$:~P(s]ShB`ZE4$OAhô:iCX!5R+":\QqWny/zl&Nܓbψ!*wX7 0I&T*'EҜ&%nD^)ƍA'(9ű!鿓?2۟jkD0\纽\ A'q#PmYm"F8'L]Hqp`=#CRћ 焒!F:R q^ ֐1)7wbewcj::w!)OMAp|pw;igQgKh_eQ9}u#1VSV b{@h/=x8kd}p[!5hdaI99<ъsá]ƨckFΪ.VVxiW-'sqm8Ϡg)U}Aa}it:wih_5Z3>;,~Tsɘ&hɜ|6 =ƫqvIՆegx|1?TK4ܞ]Uʻ7l0\3Oo'QJ߼s{J%K׶$56#Xdyb'܋y{]=oy90[eVg/u^u(; K> C~(VTeѡL%͇ rCΉNO.W{~/|ݏwǯ|L9~7߽/pS8\+|I}s[e4qs"$ф %KIHi3bZ%hA ,x0yu|aϣ|̵sKVTaTkgY1e\¢Ybʚ+:UUksVmE=hg㗦'k㗮t_ZJn◔㗦wǹ3'k&>T0B9 ;u5p|u0 Oʷ: ?yF/vE?5 ʭ m!*xLR~8iخIعE#"hJ6T>Ԫ:k6%qkM~M~wфxm ޺vonakB膩zGͷ_5[w5y ܬ@ͮV9bM6Pjiu19=f<۰sXl.z>|fGt߃>|5Tn9Cl?bC-8 HF!Rm15ZѸ+!QsJ=!7{PVYV=ͶPDB\DkHSp 5p(}2g4Q&P)۪Y'̆@DBMG--xkA(SFƷlֵ& )}&lwKxF7>q5Tf w"3/gۍi;7bȘ3s5t]w.)?HST'q\LrP9pV!FT\E 0!tAFAFnMؤlr"ͿdPcHmS5@oےØre<ϟ>]gP41ޗw/#@lɇUmF w{m dl|qOg{ N\=wH7?%ܸٝr §^or?_ ɸ݁6%us-Hk7H[Cuyj~ZZ:X1c8B,~=IŸ^-1K>I>I>)66>p"N_2exɥ5:fnL2Υ ܢ$Ik*@=ƘgZiSە\Znu- U}[_|Zj+yn%#M@AjCF)֓ewV!j W3P>vdj^5p4zJÅ)H1F1Rbc#)H1FQJ enF/v(&ttnZY۫0aLƃ̇CmEo|cfppV}4 nlAgRR'˦"rN Cb(\ p1.P C)W Cp1.P Cb(\ p1.P Cb(\ p1.P C ApWRy)n^楸y)nCB 7/Ȩ7/KqRܼ7/_.CZ1"KqJqRܼ7/KqRe@;p2McTu{mYN0ubj)` s2Z&,`yZHGEx\H#_zߙ iVZ Վ * ҠtV~Z~[R1IQg׆(Sx9si[ *AM jQ&(rG @&m iHS(μd3c~%E* [fͩLj!UKR,Nrж_5q+NσWPw0Jh߀r<\7=oh>~m!:RbVɤ88aѪ}фw&rfΨ7m B"kmY748 Dk\ ;bf{Ζ{dKʖlgHL2(CcTye(p{Ile8٪yHM@49y:PN[$AIv'}$#de5q6ogenSҋؠ|2J#58%#`Lpzv;dZJSX;DJ{0 Ai21Q_'\85"L> ^fS&c 1򮨘+m@KnYPp j g0O3 ` 1ÌN%"$jlSJu|$/}71VSg1,Q`RTx{pvԁybIYc{ƆqR˻ }BQhk^t3^ Dܭ.zx3UȏK1NV@#4\l)8F'm94^D<i**i*FJ <rAOzDt.F%3xCmبR!LDy]mo9+?b>v7}\ RL|q,d']beNv$;bWSUj #L:x6~#LY),f_|O]Oxzz|qFO寶&&9|kA>vUح$ 얩]xjktEO)vw*r䑡 혧.]х:TW}s}K3ZuM( ]&Ȏ$;DeTL#]gU`K:m GJڤ:/IG!t)@ i{c6eԸgPi}{ҝ<>{J׈YwwJaVwG#ÄPy7+;:uw;QChTc =a8Ps:,Og2H/(§L5j[n[r(fAt>CP7mp!yMaP2ۢ? FxE8t#EbSaΘPSʄQZ6)\ 8F"&G֝1[Pݬ/Pϴfx3=ٳ*+iUpN۝~YQuohۃ1%L*/a@e3q(IzPG~xVlG8=+.桠v3]cv#j vcS L>ZEm YuaBNE(z:1jf7mt1օl&XI0bK#cb.&N516g;N`rCAf/"bcDGDY)4$a]T)ƢCI%Dm@[b.`mۊգE1M/IRq&ȒNĞ43T#b3qve!l%o~7v#a3!dKuBlU2RbVcQ䢭1є=;S&AeCfڱ'F|aoG|Gq 2Z6я)Q;l{F!I歷{|Aʬ*tY1uh߭ԮW1| TE*HqEYBI#D1(Hm({c%L0-cPP lB1n&ΎaRw1q}@1x~}1;4c 'ӶBwSŘ$}tq֩Hu1x`?H[DR3؜\HJE4%b ޒzd ɛ0łJ)eՁ$ 8-<qSQU"v5HP~ݧXxѳNε,W t#5-z-JHlS耜|潞KrG?p~7WUXmi-A{ٱkd(UIL5VYAAzto7C"f?ҽӸr9n}y'y EC#>a6etE"(eÎ9^,8 qT)G$HQM BAG`;C^ABe6|ڭb0 }'6YH‘4%RkWz%JE۔38*oD⡼9 VvW$XNZ2Mқc#|;WC?93~bwu5%*v%KT+7P2c}e>˔ %hStJ)`<xy3"sCJNIJj1'l4IWHN[B! stly*]{;G8^g8~Y'f.{S޳K4"$W.k iIu&J `)Y7Z}3?NcޥklvWт8T2Mo"E=Ӹ]e݅lėZx5aЗSu3qgыCB)1ch 2jKp"ЪB`ARN xzꋤ<ĩ]^Οӆ2d r5hKW71{']**0EҠzkhfl;Lcn;Xc 5ZUpwlu-\#Uka[i9]W%vwjCh;.! zk-A dvC*,#Olv\˽/x=&iJB”l1x-^qMi 9E ptۀ^Tus8*$t+зU39J51;s9mKou1[>Ԥ.4 Ч*쐒 '2Vt:/ɟW?/8]?L`p./S6~$W3z)ןz?Im"]]YYz ѽ[qnRO9yHa:aHT?`#!_C6<.fY~X:&A0s۞hLڗdZ`C-N;W| &?djM ބkr kzz5b6ĸ< tOo^;駇WʁtWU\/XZ%šUR^ \ig#+9*{,pU}pU4vWZ#*0*wW,-(spU]D2Q+i<8;Uʕ-`W coi|`~6GP*wQ7Wc·)7BOM?ۏ?Lf̫y?II?_xꕪ= ?7oE+t]pz8\U) WsDp;y<쪸 U)W^܌pr~M՟]5ÇEͫZ^Zi^`ۗRɰ+zT+Ǔ4^/}3Ֆe(n_(Td99lC[%/T[ytzq 0zHy8fuo e*Kut=~[+R+_xE;ZZ މ.(fۀ6Kv/]j/hp%Kx$c`(O9ލ FM`}V9hU](Jc)Fi# D##QSK+R,CY<mFLeo,Tӑ;B5C@l0UaX})NE׶=p^ ok"Od jWr%F6d;b6gһMU:¬7>fzr칏^_LOAPde҄m;|k쿼~,e>)IdJVy *SDIX z.bA&6R\bhof# Ge,VɌ+L؎4f/XhG,|l7.*3^<(ZVZ?z__ŀ.M?/9bS!lS)pCvitYI"M[c:{!+cTZU% Bf7¾@_xoK"}b`n&vĎb j7ӎ}Q5Fm7`7i@7)`SE S$UIo3q[Wy3wmmm.6oK}f3;9YÙ Y-XI~%G-v˒QI6UXh4 gw^|?U563X]9t=²LJݺeKwԎTlG]Qwl9w=4m-<_MQ{fo72e $:p&8&F#ms 0N}tLrEi'*PµYx0l)"a VF*N1G% ,tʺ?[=tT&qz`ޡL &aq 17N I-IL"NQN[;z' º-dvWp(`A= m?b-6xM7A(^W1x[jU~١P{vl>{M/6s/G [_lt2'h|[^wLA]K3{N]ZϽOU gZ^qu.AZ>j0~puhJ"fD]ߤ69ruUT|+Of_'%NeIozOr8*z4sJgZ{{PG-K]?hì}z?j~,W;&?N7%|m#CGNu2˺2]:k~!qf{ZÈ77٬:$iE>Sbdz'm\gM}VMra% [8mJ8mD)c }T>;&?y>FKpul? mtX] pn a=anƫn.[6ߓ.2{Ҝh[0A^e,{p7᝷XTdL쌍ؙ];nk]Ê[ц߹1Ƙb&ʦppZTRj͌LLaat%.k >,ϝ;wg i s@w&9dOv AF0 $ă$t!M`;l;JϪI^_ 7_kkej:Db=TO#҆ +0N6x)&AZZ79}be!(W>k!rRNUʕUB*I'ސOsgs-yңISrJL}&I+Φ(!f1Z[QR$Cd:gBsYUq 9k4P2_{<ݕXkzQws)W0Հ[mZ^{x1 Xm RoZq :ORj7=IٚHl+uH}}}=)av3. B-Dwow!;|t -IGP *l\Φ'DrJ)UHWA+ 엌䢃xρx+QVO~+Q賰Yӓ'a}23ecgxȘ!R3JIh 2ꜣk:Kk9L4ĸYdbp5jt$|bm g{ N2_ GVn|kp unG `|t>v p}vH@H)PiYah^ҸR!ЀWVb >etdAb=bK9ढ़ObΧTjhFR(O| @0,P$fzUIvk_K. :(X;km"CD&sQd a}-(iσj>hG7m;scOCG s{j-#NJVzTb.jpr\* eEM]hNsԾ#_*! `-bF[c==VUw8Y9ZXw#FI.ti)@QZHkM[jknU?c_jkډ3 Tf-ZDiLaAJ1X&N q(\`F1%<}"]Az Q+.%F :'"z4(ABrPtV2%Q&͌N`wN-YqQ/Wc?ҴT<-loMN]*$ ~Ei]~٨M^ܟ'4f@x/\8I>9u}OpK@qL1I&NəRrOd.d2%xy_[|%hU|bNel<\$[oibjD386q4.&zrg?pjU.צgJ)ӌ1i4IrS~{qo|ߨb[մ&=k?]9-?:X~z?_ >|{qD30M$4==o|eA5ZZKS'|~MB.~\nIC{ ر>k,wX*GA'p+YYSҋ&QtT;/V!V|.1iT&uX/GhqbP b#l!Ƒ" ̛D΀!|JagȂRN;SmxqoBUq# 5;S!G%FPCnmM<:v7}D1tCХ _:L8h @+m94cUITJ2UEk :bsAFA8,03x3xe 3 [i4u?Zr_C P*ePtveJ^VA _i΄u8yx#I,LPXGu>f^s^ RWeǣ $~ADks5Ȼ/_:o".+43 `%zR2j"[)0o lIQAb,ᝣ7S>L^-/WVH5Kg7,k/K9c` T *cY1rdTggE8#g8{s"fBQIP⇹.AB9C"J 9ύSL[`Zŋe5HS58' eLZ ҅m?"{rL'zPAi"/T"@G-l:LM1f!=;gB@cpflfKL S _d12+g' ȍZyQ>Y]Gߍd"n S\bՂ KR6ڜ4ɓLC=I䘌̱NƎHi!)%iZ&mvuøSJWTè XWSè ׈RèֱcaDa0Wd9|:VEi#8ƛsNT)4\u.8fO1 y:s X[p5:M9H24F C^V&%,,r%uCp\Efd.3r0I hnفpmnF.7_>$>'/I\. HRLrW+ϧ1R!G$(Ҹ]-4fn; l>c1h!-xX b<-Zˁ:\--gxSeawuOYw=}Sk}͵BnmkFem{׹/fvbOH$m+l]?ws<͎$\)EMkgudhSnp릳xSCA\~-&jmve0yͨ\%*RhJCKr;@ϕOY2+OvǏLlӁ.}B˰Q]^LJ{5㕿+<0𺴻'؛cD9SkP9bD%8H_C[*mDϧ+4plՍjfS%Ü]Fɚ0U^G7"o AS ZV'Pn)U>&0à0 :ACD 1*<0)g&L0[- ":j fMIĩ]-L%ʬbN (Qà1 zqiŅ{F~8.ooO]f{B*u~hANyJpI¥DˬSHQZV2Hr"ˊA(Fu٬͖wVEK\™hepلdBFGfLN+8g< R󭮝p>oS,r>HdhY B6N䖲,8 #8z;ڑƗ@VObp7_뵿 Kb6[G:tݶ=Uf?vF#`0 ϼt[+IcRJ1IFz(9rPf,G ǰEH܏t/h8>Η[з_E+tfXޟmzB?zzv8 ,:єǟH4̣rJh.(E$)FBI;"d]nԸL# p+nf"a>5hGT`+HP tLo ,u3%y(Βԅex|#~=^~fut؁-v'l(lnePo-ru^?}+ #IeC 4 qgrM:DBb=\&B复=t~ =BO< yb`Ht'H=HMNj:q4WT0S=AL]eZt!9|c8Yodڣyp&R#C"p-A*)#Wh-BNg}YOfדuZٮqdhP/"E--2w!  'pix)a=8ywoAZVƒ.XBЀ4qu:M(|}&&J}kb3?JM(68}8&Eݺ"?0I)?-Z"d8?ʒJ鯃:å:GcK4F*baыA= /c&(|ry0nU#,m#j5k7^qqW3jd}vho&ܨzmؼpCEx#;u8wlYpr˿OB5:>F̯"D SbgLF8c('H[_im^h~b/M#5u>S4 YCR8Q-!NЩǫ(JAu<ώ,S3Ct}d3nV s;H 5D' 47hFxsyc)!qȏ ~4 :u,uL x!GG~&s(=8<Ǣ*>!;#oy@o;cGٖ.*51ph QAgiނs:%dsP%F!6k[\?NJV֏q(`4 uϻ8̱]^"pMX`k>^^22܍[XmV{nN{΢umX?eפWu"{[׫e3Tz!.T=ZkJ}Q=Vo*EkF8U} /VQI)O|Ap٧ s%)i/_.,ÍU4VSW$pVj$L+Bo~XXb,N'^OZ]^ZF ϣPDĸ&z/()ct1*!jc  )KD..yg(^ (SLiaB N7Zvfc G%j.\41\_id$jgF/u:I&ͼ.) -#Vyp$L :+Dqh9ܘ#D-&S8'4'?bzryJo_ymḵ˝G?R[N<~. rvR@ ,   "TF chV0i}N`'FcRetHޓAqE FjH3pLJTTqGq UqbXXL3BU UGXΟ̸bkhvv? MAFh8sĎPP!q+tęƄFJ9XHq#3Zqk$-@!fc#{YMtP+hNhc6 R`.&n1Ŵc_օQ[3ح)-9BaQRIq'$d q YK'{ɭٍ,.x@6̐ "A2s.($Qr h^TLxXOǾh #GĭhE\LQ$Ce^f&x>qMd.W"5oBmRrЂ3|0"ф424!-ٍw_G\-#OYLKE[m=.nx6T| DbARjrL1e\#x \<<,{xb<<HױHAR;g ~eAc*谶֪-^>Ƌj (D3G*FF)OAs5!-䢶,4H '}LI/-{z1"<%h>"c߂1!+=Ɨ>ATJV)*֒g,- _̫9Uޭm [kvi_M\ΐ*;Ǟ;h#1>bTAu;|$(ΑiH9pV!GT6.A`BL٧+(8sYp4,xQ΂{ Ҩh4=k zL던2aʍi%Fcy]bN$V;*O%_$_$_#`6Zx \H  9$g8DdrN:8ZWs(IRl\2*@=ƘVT:g1qv;;(:o2 |ɇ#ɿE>oD}GHY"p6_&jt'|`Py R,q##Q|H)GiKK#K#Q;$w&G%+r=2 x'M0 M]H,;'=Ö@MuH=Pl&Zg\5NHdBAHS3lwPb~sdBk<~y{JEti5g۪36oSۑ>\'|l?@I`~uշ᫔(bDqhyRgHx(ie<FCeNc&\;i=&'En/J!M q7D+yYGx2ޕq$ٿRcUdb41װl @)5즻G7[dK)*+3yZ6$lI'{V"҆57%Bzҩ]v|051Chl8Axg2 "0i5>jKS$}c5q$[Tq K2%Nb"j|ͭ}InM\]r[ZYH(LLɆD׌Jŭ ZI|ԃ&In r4I𕱎05@/.Lb`m%OAht}Pt*M)ըP@q5@ uG'R}Ta =Ϧus!dtFJEpK .%ND^)ƍAubߊ,ÿ~?Դ #2c$uɕzDmd>ekZ>aԮ"Nn$.ZH1DX&ȂsBIF0kB&,D]8ͫyV.7w0-~o,֛a^ <>уW%ԋuy9A.wG7ؓPֳrbSC 1fH浯*G}Ԡ#%adVS]U^WS}rnQQB;K]\m9Y/dv3$ Ma"a }WN8w5)n;kre|1MзaJEQ5&vQ qL49YQnLŕ:RMĈ~i޼_|{18{pj" \spcӶ.veG?^L8;ё d]I-IK:[W [[bm1*mѸ'l܊y󆮖e +}zTF6:RՕ ΖMZ H.XxrTd87}3SA6dpLd6508oG'o_}7N(3'8>y[q-0OA>X_&}7틖y܈mmz۔۬rod|Qnbʭŏ~;Udwsjz'o8-gf VEb5UfYRT%XCaFLAuք䝸/_lZ_vv"4_&8 F)YJ2ǭyP@k"\=(aՆ'_mc^Y_]\qx0Q.Yq'v\;˔b2%}ʘ7-?M÷p / Qh3W*JA?3^=/ /\:cK`BɨR2gKB1p%s)V$Z D30R7u^8;+KSINPھtؚ8wIqNk}ݒyw b~a+4~s|1k߿~>RSazQKGr)XXY=]ggh̟%|۷3W4tK>m܆4Y^%56nYQQ4Y4\s wmP7Ӌɨj (hm L{}h_h,.ّ"3;]ω_'<eüV ' 'aPRoʼ]Zt>y0:.|;|;|;|;|80U$JqCtDn;5s'yin^{`!BW+qn>SS;O{ }-)@AX76jIeb%Z2Ü˄u0pN+s~$;FҾf*Mg2̴nR5"f#?S;CbKk,KV}Rʨq)]Xz;ѯ=A=֊ĊWsjh1=W*J#Z^.pP&׉;nm L ʱP罥Behu;bE+)7x!W uf̏Gi};2ZceZ7\ yIU,2Snyyoy< Ӿ3JYNyI$S2GTjNqythq-U)nl;){/XitIS^:?$0I1zͬ4+*0L&8d0PCp[v \q. ],=\@ʑ.Uت++pepuJ ".9( fqjuR](vǺ*+pp)rDW/T:\\΢+[ "WaD7C /__I59odIODpE_߻Z4 ,ҝqtQJ=L@6QKN0 UW]+vp=\D]]b@UWupRCG=\ Ғp\f\q&Ux/ x cR$?G^#2J6PJ@ % ~ey@cqZmv䡕.7f% Ax7+sR^rhC8&11q:FcꛊQ'b/"܌> u (0/Au;p8ܮ^n[!`^_)@T,6% 2$%PjL\:KLGXJ4q)gFD.}n~sCj;`?)'Swy2DaWEu:8?ݸKHX+W㉫>f} !k9N*fr3nf[-N|}mk(2w56m5œallH/DBx> $h TRbK/-1~kXZ] v@2Ds2DEp6)Q)FXBS Hhv}_P7G6²l~g'{}y}W(gͶJ _kS2#sjؘnX;6C*'i_-v&Yp–jQ6V)PmT B{>v:j $N20v[ED2"{DhhhP KĢS LI"n-#bk܍?vG\(/.ږqQFDyXޓ#W -> fl&;gv?O[cY _%KZ$ rMɪbX9*Ghe*%w6wMê[{`a e&UyuNp{uޏ $ݼq˳B/U]ff AbO9F *+!ahT! s.ee;Td2":HG(DbzZrE1 +߬wa0rH# be}0ZM'wLH0 1 rhy[=v>V@\v1ټ\j -w|n&ӲMu=P.iƑ)D9h#$.@W& 0 @Lx'#X6[XcF1ZleF͝QFGFW1XJBQE4Rxc]ٻo~x7*uRì+W,T6Ynep rKTdG:d/Nם_f J M9JXń&D( G ){>P%MGck U`̧1zsƣamЮ`43ʘ9BS8vqHŌHs^Þ4x߳Zֳ< ~Ip*l=HXn!.#ENRAY}dJkOH҂z޽DTc]Q#5. T%15L'{_ME n*4!8$a&n%5uBô!y &jt x,s6m!==KӶebV=̓h#n c4za<;Sn۝^pԎƍˢbH [po-EJΆ!vC˄Qv8\^e? {=v vk`LwpjpYYÎO+9FQr5(%T>nW7G[da& ͤ,R|t70+ em ߡa*d5cȹ[;j,6Ga8|?w"9-7HӫVYͿ(ɺ}3kJŕkktbGV A9]h?5W5Zh-J~=6x?~߾uxaw||u%]}5:87?Fv[h'm'?} U_}ٔ<=V2'Ry^Ɨ^N'¨>(||| =Ygpiv>8y{ymi&0:XχЅ+Sǟ:8Ks=iٓǽN O@?˗/W=̭/fPt+RJH,/>?=B]aER})(tz' £=@J5 3=PJGok}sЬ767)8MU *4-JUnF:6~3zLnT*[׮\3ָ pK]/j ƒi\OOhh,FuUG٣˞{^=FeGEѳ?nNQ'惮\2B0򳏎GG/?w!al'=އ;PEM0ωٜBe/M(t?镖ĿJlߦ+v~$A?,'׵a:OF-#x@9.32I~ ?,\.ʋ ZZG0OIdRg>1I^,濄r;ٯ4׷Vvc+(9! &Uᮃh}N@)B]g"ȣ2ʣUxP$ I~` Qޭ:ªWۖ%Wȳey:u/w3Q9sjĚ ExVv~0u!)EOnmV摆oƓaoTJ'0@J?4,\AYԘ]g[`cv aV[/$'ŚJIy&"8-#c*gJ\{oskOA*FS>SFOw.e&[KZƮPQzq mդӅ[TB(ENDd{'(>zI!a,u+¥1&`Qc(,<vk䛮8沙TsB@r8ZaKM.l"#C$-qH{IdNٖYqV )RIN3c."i%\P͐JJtB _CrI`9\+qNf")TD2'Tfi%1@!AyW;m@(@ܓ amb%{d\,Pl`Ad T)J8Ű=]&=?ѴA2KGe*Fc[d U!x,'IbK1H'f .FMw-Az筈AgoG FCPJ&Zm$on\ tfŶ=TWp|ОSND&6jC@,LebjΉT|tC<Td?Wuo/OƧՃ `?0SrmZ۹K2ILwo{ \9G:8ؕ#aH~`WtGWcv;5cs{<*AGNnԺUWK:JnaJ#i`sRדQ1Kl[8~z TN^v镯3:뗯޼N>?o>l/cc1d5KЍ^::QMߛt;K (?kq=+5i՛9h2y 5;˜KFHF^k}7Y[{/DWw^nN+ywߝnOUw03pZ׬BTې%xqzڥNV-~ \pAvn˽w_j{_ȉvVMۅƺӖwjVruÆCg[3*ik]{IkIZ9 $`r[+=uU pּxSxB :FRFv-3أ_PdJչ>`d6Ld˗;2E̍b `޾JPl2ZkQtߴ%x+׎Cl=iGfZ3Ə\fإK(j:qFMN. AxXdȉR4gHsKro+L emf SV*ɬYG a#*]{K}41R0 qzP1G5@"6 TR$|;X+\$JCƦs6֙e.ډWQw>=d~%Cp7(yѧ`yN>l{L޽`SNK|n=c9>F#/lHh`{y~U w6НBwR gj۸_aSUե.u/r!L4IQ\)R␔R<);f0i] &ECrlrQ,B!y 1D&׆bL, O*JQf#EOHe*Qϸ"8!$ B * Sb3gq5^,i}leSږC6 ́fU3Z&1_}$*bCpR*G6jIUb*I 0'2dGfQx1;EA' O'e*(#Q* yJY<Y\JVvYtY¬T+Y+sګէ})fe2+ϙ%LO`e,P p\:cK`BɨR2ԩ`\H4')9@±H Gb0ms8_nω{]KOys )U2)NRQ& hp)+K4{<:+fOXȝ9c#rGєYFJTD 3Nb pgf.$A=[/●] 3awhCĤIͼ ?:OT|'kUQǡhC喳Ԭ=Zk}w00SWGuaJw.&ájԏ]vwMaxƮ[ꨓ07ւMf ˔)A$V(|LeIӖ_iqT"&ZYB۰.U;*{^yj'hNZ{>k_B3D.=@:$!}-D !REI"'pD@ Q[ƈEI6I Ǻ\[/, !lNOPE#DF /$gB#S-)beh9Sreo!5UŠX9H%#~.尰s#ݓHbZGF 4ykJFsBE\(Ȑ]>Mʩw7=p I8%3xC-D^P/u[*PHV'S0rgW3UxE8;Z3l>&~/-{p^󁫭X5Nr4h.NBn5B:k3nwz8ub6isog!76kLd7Wk`&'( )sXլ9-.lJͭYW%+,Y5uj75ﭚ oCK%7p72懃Qb薊wCs.hl_9QtK-˖[Hk+\KU]/bВ/,=aq>T͢3UFz!RVg:+"t*R%VI`^\Uɧ.IO!(d .CtyY ("NDH<a U[k4O G[&c C᫫gg ]1jmm93|#ʒ}8_/̆? P;,XM]eyJ 80m0*REeJP9l;MN,{H%-upIxbC*du2 Bb^eOXŴK%vZ>]6COg]-hMǨE?H#IDmǢuI&ͼ6EіsFBLh$Se[.xN8P jxy=32lr^o m;blb‚*H$$ZJ퇃db jEm]ux.)#^*$[0";DVA14d%lbDU)OŨ ` +@.rEJڋbPmgui\XޏPq|Vd2D}&{\aj~}&ݳ0E%Sd-=ֳK˙7/'@P  zp5$u]u vQ]62yU`)U: g Ʉ*j͘ ;TH-bl<q"/=YBlZZoSn!,]ܲl[SigF%FUTU4lĹKЙ;;+#1ۑws#Sr۠ݬّ(O hY HRI4D1Α2c9Rnއ\OJDH\?ϥ)08mGk|%@[}mѧP?oxxTDSQa9R9%4 qÂK@K J~!fw#S G73@F+e JL@e yʹ7=PАYj֜ITNΔt .Q8KNPXa`(&v`;Xi{ܥ&ru-2v`!%śl|s&x N*jQp!l!0!XOrBJ4qh;MMg4g4y'z7MF.@[2Ë^ΓmOu6<=Ɠt8n*TxyjuB#E!!4y8NQv*Z y=H%,;z Y=>?2D䛜MN!QPчF%Ꮩ${|m_"K޻A܅~ R[ylO־Cc~rw襮X}=cV_dwx/Zje%R=mٵ-ÈkC1m#C9Q;}i8kWۦZ]Of&vCu\j^g;'}-hpl,D(گqE \>TipSHڛŨ}6~5;:#$wٟ{C1''?!%O(֩syo^u%Ȉ9Kޠz-$?$Ez99߼vr6"|طl}@H0^bϻ61f{޿;~7忮wݓσkyc4\j_!'Oϯ^aҠO5|z} q9}I^~8E~kF]n5-e&/W8_rqks_~zF Ynzn8?{۶"_0,i)n[,1O[,-3$EI(pIlr8su^sw8Bϑk>^k ]}:OPϟg5̍榨F0>K gfєjU@=TK;t*O!9SY9ӷf !S_m+e)Z]xG Q2=9̎kQV%vsu|~ {{3I7֤j_,v(6X}z:]+o&;&ʛ85 PUj^UWef1qqLT͘}sjk]8цӻs?թ_2tvYXeBV(1Q:"B@Ox FGa,ScXr;|+g# &U_^0iK-Ds <\slC ^S G;iD'Pcz0]aRXGTxY W3[ qPJhA:ÀhP[ј{t׼Dh$)sͭa>]:S1f]BL#G1 WepWy`nَG3F1{fI zTB(EN<;)FE!K$ڪu+|%0!@0pZFe)A@ 䭗Go;Dklx^G[(sAȏc&d M$})jHsIz~h-ߺʍp!'* e$'zQsA5CR([6Ht??E* 4>A>bPuH#R08IE$s2JaQiƝVc ;"APvdb|=v? E8i`~ͺ0Cfr!bH&/M! Bt\%pN1-s[YխN ⠕)E^:*S'*C"c c=L3X2Ĉ'Dn#(x'wފtqO1LpF@!hx`4edҡNPnYrQY㮒Wc0 ,!Lп?tN`tO|Z5>mt/!)45s]3yʟ *P3{ D3$ENC 9P%"'7B J1u'f<׶w2> }W+aP& -,*,'Wjjr[ϝE"d44>},tW.1:ߪ .tvv:9SND`+u6*ML.t6 ܂LyGt >Uxr1¯ŷyl 1yM̵~[gc;$Ϸ.BM-C򖮛!Ͱf˻#Â?ewӁ]&+:~nuɦVRZ̆tt9,G.U};0fm s~X)C+*&~++ י\r o{}ݷ߿;Dכwo$8 '$Iu;>к˜ZϷzc"$gOq}`•g~IV^*vƙx^P%10w ȇt30hJxq_aZ}>Z&Gj|}FB{ #dȽ6%3b11 i1aކ)ˋ1z<ʆXLÚF8F4$h)!Vhd%-ZELwʡQE[&miVm_Nu:/-ѫ/lye?ȹ\.b[hc1F)۠LnL )&]V%![OIث]0uv F{31Hc\zJ=B[klwл$!?MDS?-X^rضD) V.Jw-Qp2YX|_o?ߛ?DNYjOuTl8SEo&FJ2ذ@yڍcB)7jEm;rY5;X+\$JKƶiZ#g>5vie+G'݁\˅`g}3gN8FtPک2u{g(+׋ř"O)Lkna1"Ţ([by_y 4ZU{w;"A:x/Ē:[yPT(!%%Vdom{M$>eE2he9ݖ ebR Z.t0鿚mnY;nqʋBZW 죻b:?mXKu>Esɉ6$4N QyFOoFOhG9O}C! D QG FQ#Xԁ@w^xkr x )n$ݪ41 hDY; ZDl h5qoMF!(xAcˆBkl_|o}օ܋{O-A.k3`Ջ?t_>cT`m^鬙Ü1,QK%<1FhcgmI 9Zr;Afx~L--G#M4&c$5N:JEhg*¤.}9ۂ*"gxvP9  S[r%dM A,K%Dʺ8e)JD]VrO ɷZL4,!]sRY}/sR0RK-x c !$$*PEF Hɹ- UۼlJ{BnV)n6-/m/{Yb,%mgȾHa@`\5$~zGX_<\* V <`)K%]g(yt=zD (PM31"0wP h)NŸQ( uaЭA8-(<]ٷuԅ&@W&#,*y S L3DyK3Ucse<ʙ8<DX;yLrt?"匷}q9#Snw3*]d&Lh,dABA\eos-'? ~{l4E c!u\PƑo +>#6 =t"¦;NTrֱȦ%;NF^=a?LQaO[Ƣc\J\ڪc޵6q#ۿ2Ov#PVxw]ͺbݛr)iM IEV\1R%JFʱL>*}N/^Be\H)}5RLZMNK@`K5SjVݟMdTO;u,!][nu '1bePB^KZgLjJѢ:MSaLnFq>uZRnWc;*2GmRkQDfD4I\KDb#$*ۗLjFCkR J=qսf:\%))/p=•bnu4Yyn GI-f#jy~nJz>) ^8S iq5'14٫i0{ \ Ukl*xR%AkV `TP?.u=ٌ A4HR `d+q͉6.l 4j#XtF8mCjGf:GI+9kCCRXE6FB8" U'ڬDnb^jb ,:(3ADO$CJJjk*lV[8`vŽ[q ;na-|vŽ[qhTqvŽ[q ;V$B7Y+ d,tnMB7Y& d,tnM>`lNK>p.jYIZR&%1B.,kw=EŽ[&|dZl4w=dxfX58#i, p{o'oFI،BO1,nzq ]V `o`N :eS<:ks;ic8$*8ʝ71qV{4ĪZҌPG Xp).}Ԗx|ZD6` j0x\&HFl,WɆ4c],P,*.%ϸc~rsm=Fݍ?)P^;8bo^H50 1pʑZM#m b-iDZY*4C 'ZK(IItY<^Fґ0хUzc&݈m~.Φ뢶̌ڲv`oe 8jJ,\qiaB&B4S˩VwD.q>3 ؜8Fc}9!408waO0 "fӏuQeFDU > ,*4iQ7:k#5KSrFE_@xa"V!>uS/ gN DP#1s)-3"fnDrap 4Q,?)ےJe? ]N;tl7\oYÞdHĞr%ު\ŴڀZ%/i ,9K_ < E%*G+Ւ+Y0XUa(' $zVn*[+w3$BԐl|4)FN);O/2L!9hD! thq`@ 2QI{f;i2yph5Z`(3*h2:t6*P`) NPFʽM-F? q]~=Z*l7;(ESE48điv9\U M/ KpηAYBHSf#!0o$с-! !r-|P4ۈAin=ޱ m8OAF '/_q7N^c+#QZ9!QhʅLR!WYPpe&m;tU ЄyDD SR |`Fym@2`"S*UVB& w<xhsuR ;ݧuKWj?_x< 5@#=i#41KuC-fGp p| tBOA<pt6uLŷK$,7Ba"'Ġ,>2%(ȓ3 ugk^{s›%+et\p JbEkNb٬rb}.6qH]BN$^^LRO ўa(qihs.$ 1Qia0++xYY$5 \ QiCύF芅6# F >#MOxMO{O h`\ы*'U:2Ya4'a8ڏƍËE"Ӆ5bHjb/g#߫`f@7"wU.˜נg==>w1'p 9M48> LҮ7G)[d& wՄYM̩M҅#6ХCm۲oru|CuݭXh%0[;o>˿yJ"[^C^~hsdB^olկIcӥZ H,UN=z/W.$[>LFvtRjޤ̕yǽL㖣:cr1뎏{|l*tZcp;36ߜi[q8Rד']ʇhzリוֹ9{*0bB7Cui5-z1bNy.t oI4sp_V'dQ6ډI{_@ls l$]>S7IQMM_k-sb]d.kP[r%I=k;rO?^xy?~ov .<:81{OuZ.^xׯޛ쟟^;{t*OK{Y{0̵d῀דzs2_{&}cEʁ>|>|Ǐ"[=}3>ŵQ~=BK_6tRo^rt!D{-k/^8y/} sm7/3h^-҄:MT ղ!_al0wi*u^7fMmZf8Yh;4#'ϥC= &uydFW]I)F^xG Q*)8vi6s1WKl og`ۣ{ԽaF=3:ڲ[$jg)MPtt;lSui-eZ)-JGV;xXf/2~9mߘn4Q5!;K=sqˇW;yg怓9&ˤё*ūF)}"HΈh(6: c+ EX&糬v,T=dL¾#<\sl!`xp\#̝4Y" /xX¸w1n5?).JDeqc*h%{k :d2A sKe7V5''4W۷/gG߷&=Yhpsשq]pb@*ڦ}Ji2%UNI^=Å5A)٦"WaF"4EW8/6X ./}S60l$.f۟7i]iI8e6W* Rf^gϿxf&᳗*TF@Lp& m.:e'\pF5F7sRBn tYvJfM`9jiGMVt4@/ogCjTE Nh3"gu{hcu?WaOaպ+]O"~ɇ?ez-#cfJZ{Otv*Fco3;N^w jQpn=J"GB[hUlcj% \U"'"ShXRQ KBh(Q0! pZwmmLeEUye'q݇ W+d8eM")JI=.%f0@h45 N |Tvo8z|y_#gCT)o]fN Ht][+|Q`yS9&J[Šܬ!&TciYќ;rR`q ^rb)vI+ jPRX.mȹ$Ã|yENnf0*#R"%1 :s2JaQiƝVc +"XAPVdڭ ddPi S/M_ Q!N7JʅHX䁚4sP*san-? )ٚ6U1VWyLIک` a*D3#R`#IۅB{G;ז GVĠU{a37(A$#!(c%-w&o`|,xָdO]gT`b 饻}`@ >WJ b]e NE{2f4dK*=|e}̮$)c `uS1ElSAuBOJ`pUtb 0_RJMє\ uGUDqؿ|f`.Kp MwPNOJ)?ȫ"xJXq&FI@a]a8#&?f }KgUt#?7/^Ob@>)1gs{jl휑('!?>n/bjZ[:[ Y 63,!!/| {.fmvϺ5csszUַ:VW5%˞ i._d9σKU!,=٠WКJćnz Mˣ}7GWG0`S$ߛ?Gך͛椩ITMMMڕMvyEח'tny[. J?f~K(ƛՕ}H7 O>*^[dLIDz&ܙ/E"EL1.Z] bhQ/B[>>;?$Ya"$"{&B R"f"FpbZhCkmxtkD{-[mCJLG#LTMR)!Vhd%JZ&RKn'Ǎiq}vӲQ^iyr^պ[>%#:>ld9W׵9?@X̵Q*u6([8SiN1#5S7@ GLD7nvDڑ@Hӎ|a Re8gITtSJmΌ#'X1#V3fګv&tk奌4.iYn_ʾY j$R\f:_'e:tv/e YP.#Pg:Y5 r)۫fUS_͇ip_aI]'.gݺ-m;^W^LJ.լF N[S3eUv5.(\7Vz"MKbhZ! Ƙcx45a^r6$Qs-9˝R 3U в<حU ^+߁U1=jVڪTOR) ii_U*D(\!r -%& 4*= ޝTY 4Ь0}oFSP/v`N+[c9d5n҅{Z>Jk S}PEVvC]AmeLP?Heb.Iq֡M)ׂ~>7^6OFzD m39 sd)ld@N[n]޺yn6Y?3+(yDQ6\hW eL{R".U1 Qs uې'=+EO+y5E'H#o=HXn!.#ENRAY}dJ:4mXr3'_FjE5ĒcWԁ -o"w/Xq,sHM@QSM} .ZDpBUS~b]T-VM& DԭOv]{3P0-lH_jB\^ȋ; o\ -CALSDD $:l$C0$Yp$X/Y+o/NjJ!<7AvW,0}Ido:"8/OS7NSOI:*gq48NU\/92ǃ*m{Ѹ`tY:9`/]);zMtp<(ݒf75\a(1\[cz;z0/=i]WtNMĽhXJaoxz\>Gx\ۂ,BO^Փkɉ:<~~”Vuٌ LoErnֶ0 QmΛsw.L t( .&_H/DBPrYő\1"QOW٪zWօd_joppخ,{32uOn,kcz]Gz,*'yzܥ논sfl9-R߷fp6%w*?j!x?XEݫ/_`Daݥ٤l{1ΒĬyT/O$(/2s[g>OӴp9{ߖ!3X#Yo`|fM]墸tLyL`ݿ:ŵ93#8@.썂?1*:rnIr%I~'h~T7?E g 7M_dzojb ,`zL%qb3<]?wf]"p\ύd Sl8|MZd/cZ>zv|~R}]O/gs]^Z ^(8=r2aFtߧUb!Xvghxg?Nk<{=rޗc*[묆< *`D}էR/)Xg/pф+߽* Lk%~B'qZS*ȏs-p%5{^ZLNZ[&{'r1腉 h(#W2:?)np09sH[G‰A%1lCYjhXw-hV=mv\b`k.A(!gh՞k:FY^1Tk%i1s}b.$aѭԆ7H.Nػ6rdW.x 0ٝ6;ؗ EƆeٱI[l],nY)KN@Gf5XXh0_mgs`4ɭ=zgltiqp^dO}_͖fwrvIYpΈ<1uG ^:؟J}gw_7{|OUv#|88~&OU`\/5]d) xpwvaC-C;Z[Hmd!+=w, A9MNm CѦ)&%VYXf.%AdCp q3>w$^ZM Oךҟ_!~"'Z~Q6A+O%mf iH*dn[8\ .&/a$WLh!S( 9V^y|jKţ!}ys(y瑏&W7-_ædV@NNw8u^JmxK)U kʑaZ82GPƀ`.dT{+j |LK$7.D ~Ssv'u}k\$ZϠ1۴9:r;u`v$fz! pR2n2T )ґ,+L6ΕPQwژE vۨj<ںk)juu;UC5(Z4= g_IizڕR]p'{?{\L?4oJLad@}"U{=EnjƤtL nޮ IPZ_jOՖJRh{Mڒcϫ9>B_&V"MMπ=4,[H2s 7텓b|ǣS饥v6M~KmuYmlZ߈k6Zhtл揌ny ؛|W7-n~/Lt&DlмbncC".dQ=:{p9traP j&]ss ˆW6ZA[8i(׭M^8$ix.r<;5.2yMZڵ)٠A.RtbrK_պgE:Q"D9 Fpf5>Ĕɰ\ BYd2ä(N%sch5Z }D3`xKcD?|O8M#YBĂ#n(qt&8]Kvh)G~&Haql!.мf4AwۅDŽO3=T8>K7KcqZ+AF*! #NrѧhݚG>֮k'N '[y7X٧ӏo_7~^x Zs?tnۥ `t1zҖ![L0~ƺFrs$Pt58t,oib]fb/FÛDo<=?9XzwGl^ 픎k2"U,Eq̏bz:ѿ_ؚj҉ߦ;MR͟߾¾78? LNԓEpO}COSk M͇6Zռلo2y_>JŸ3{[n|7O#89Ch{P]owۜĦe~>=:T+aրs!w f#=±~WLMnUb&6G~NB#E&7I9TJJsA N9gȃ穏6<{anv|9ĈÁ gt>{1j'wBkrzB+%f?ԩr3N˃ [rsY]o{s\ϴs%\է['R1-o5kgN-DגWKץ9!%KV50kvOrFЂ->Lr :Uȍ6qUMРd$}%SXUsfUƪbFa.OEʠȘ!vRN%BQ6:誂ם1Y ʕr]PR`=3!Klo{|+07n"Yzg79 ;ga+vD;YJ,Ȑ`4\9/e'd" 1DG.ރW.$6 |!em ' tiA$oe⅒)2 Ĺ8^r?VH촰p/7Dkw6RzɗsY d930zrJXV:$+ņkϬKTYaQBVqkVɛ(E4Yq9'*+Ct:W|̵(E ct9cE7G݌뾹WM(kR&ZF_LdIdWRV‚v@kaYy)E0$ϸH)'M>d霏S$j.YJPgM(Y(u4q%:^]GzX~'CH2lg=MrBSo}cϧ<,3J%o0:v0sl-ۭ)ܚDLDBٷe1e[yelHac"!ɢiZ(%(X!T/Kdr!z3aj_SʃXsHU)#nAd$,EsZ,Id(C 7FFǎn 3{_{A'mA[*2 sB-xNL &.䱷mRJ>}-xzu?1 ."|N{LvRU꼯 g}}/(0p9g.*+-(J) 9̵~]UPu+%bM,82RrԤp' *raWh]vh $|LogJrA>=!ח]3ry7mrwUR>ݗO]v/瞍6,잡Yxӛ٘ZJ rhw*Mhmmԏ|w1 ;\=_{xnϠk%9Wv[Xaw|AVvn{NwtwaClh{X qϿgYZ}QW[rKz.N}k=ïp"::s;_ ٓ*%6>RbטS)%6it&%s))% a )YK)_9\=O\^7\=OZg^'\=OJ+\3աKOi\DUdU׫S WV >pդ`V~n>MHb skB{p)㞃믕58 k417ܱ?L5q<Ȼ~=_2%-RuxB 2YQsW3+zs;YHt6ސ{(\Lu`vX}3Jj8K r;&OrJxeSE4C˿cl}m򴅣 2 5s3YrBTk[>a_kO TSV;iO(\5_} ey* ҲcWMJ2pÕV̒,ri%u8uU*r])"hg= fTlIEZ1 AA5J:rPҗ,\k5.ѿU6۵W3#nޛF ҵI.*I woYdU~ѱZפ~Ok rd~~x)kȏϛGb/J|zfKyaHj#XqkdQh3^ocCl%tq#O_iܮ4k[aZ1@jx$^]MMz+F-MTW i]޽țC"ZEYj6_wh LY=9Fʼâ;Q@F842ҩmdƒJUFu,;TUJ zj֚j  ^8cN{P;kZ=eh4ݳw&䰬؜PuTJq&>{yI;2W_]#F] %WjR ·\v~9Ip[a}w!$ V[2Q!7Iֺ㻗[J4oar᪻ޮ3tVyk՟fS[]XϗNS s;^1|7nk}mGt["8CZopHjg}V{2$S\ԲyEؒUhB>fyD:B(S}%n<p7}nǟWVͷ),Rq3CĮvF_\9ZȲPl* Ee)YS-ٵ;/:]OEގ aַ!|ےQZ7Oo\~[5PS5:ȭ>%ceuPFi2^ Mx6V-A1DM\>: )$DMi\1˔ fՉlC:v}&#&iHlWaANWeSڕ*2 Bxs0ok UDr.9aAC!WZ홄dmǔv[jYys 5 U-jLQ*cNgYMdT2io P'aZ r`KDUJKf`jЍI֚#&t}M1a]v"h1k>ߝZrϔ);Q!n6Luʵ`Bi`M2: bX*D2="8SBë Vv58lMX~)f̂V@^  Oc <`!LFa5Yr>#XQdߎ%sIPfԼ*%Vme)!9QJRhITYTk䪧μ@}ZoInjɓnx"NP\I|i_"ZJx XDփZSXacBTCJqZ԰1L)Sl.xGY "KjԼTJ.0%PKQ-/[*}" A!bF r U$J(`(xu Hy#MR|)KV^ F[)V"!,v( :*Y?e`e+~U*^(70:l`F*q3F\Tsi÷Pަ',qYG qAPVZgeg1. . 8ЩmoV4ZdÜkmeh`+w%AJƬT: S"(zvc ['-&UP*aB1 ɴ &Є{+51EB^]ŋO'S8,k%Vi,z$%)x1* h)h0y .|1B֨5m2 hw0)i,a"m.0Z U B4a d!:xdhOX@{3*f4 `b-Ѫ %W6<K,' Ҋ29Zh5vE0QLDPm [;( FdI;<* cTE@`gQ*BkmȲ!, &-A MىaFPI}&)AY(^#ԹUu!kÉTIZI;t,ҝ,gAMQI 2K7o46@*gpKo!W04- 3 ]$D_ϛ O(2c0s6Zi̝gW@ٰ.aդF&A-6K`06I(np`3 =0ltF mwK(v`)jf!׺@$p58EÐ  J;@q(Qo 큾Neo;Á :% /a,4 9 9C typC-5VS(Be TP T"' J1 =-`5D}0&i` ~t7VW "\x>7\A\Hb6r 0t4֓{_gf~Qu= n-,ʼzFCcj0޴ PwsP/>PpU ON \ωƸ\BPJONCt))9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@yE@  h޿R''!:Sܑ@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 tN + 3}r`գw\ݟwZ (%'!:H\IN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"':@ ''8q8pvHOwtcKrCN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'О;NCz+^WG/Ǹsn/NYJ| ϮUި'7.m{bvp9{Zvh}v(/¸ɸtߪq]!`zCWךu~ QzOtut% Gt- ]!\BW}+@)$:D¹`e mp74Lwrǒ`6Gh5OiVcwhG3ev)YIPb'JȬ֪)ҋdm!+ [M2Nf,1_8ȧ>ُܻ<=d?zw>|rc? n]3`q`s|>ޔ+$_IZFNnhpLK~H, ܪ9<#Gm+:kGmkQS mӻ˭-W>ZVW0i=RhmiQ%`w 9/~}鏥JZcDM^wo:Xgǿ˳Zg׮t]EG-ٝz$5Br'c2Cp%S}jVD4$Q3{DW,-f Q*ЕL>+Lo z}+D)H]"])=+QWvAD !ҕ1_̭mmnkUy]][4fk{u]ńh9`kӘUɼ4Cir3Ī@cζVJ#JglAtpF"K;ż0=+87tpUoD%5{Q:Etutz͕;+ +XoanjBWݭ5;f큑i+'vl W'->3`;]ivw%Os-1=+, ]!\BWNWR+|=+l|o :ZB] ]IÜ=+Y ި+Dk?pJ]!`g{CWWKBD{8]!J`P潡+kt_ :t(=HW@;يc厕V_*6m&ߖYzD1FciDhinp2kqmsG<&b<I,;=Q˵C{ax3tqO:-m HGʌe_)3Pɨ,3vQ5TּyPֵS Za*ƗkU=kA8ȨrӵYNxo}r8d=D}'dwjvxv-^vő "o57RZojrS }hrJլT;{p媖uS.\5JQG_7_yD=Z~iXUV#j,jU'D C*sAJ(;+Di<՟3gzzV WOLWO{ǖh|۳`oAWU=o|(]!`ozCW B;]!JˈTnjk5qyj,T-aMTvawS)z7U9#jF!Z] t&׫0 Eo rBr'DWHW1U ;՟y{kz3ohw +7}B5/th?/#: ]?J@i6Uccj+dꪸӵnLgC?kl>GoY"OgMJ8׀n:;`|[TPWYvVCw7;Tgs]IntsjJ>Z7 KzYs^U|~r>KڠRMsRLױPG-b.勔 QylɊ^zInɯ~XR¬{!0M n ^~M%y ]'U2Qݫ z_/fWd'9W}k g?RJa=X&9S6xyM Y,ef뒥EniW8<|B>S0+u.O&~Y 4ms'bv0x#cJ}6eJ3m."D/Yz珮ǏZJH[z̡->uFl8h^vv7s[NkW֛;yҹ22t)ۧټ ۱Re;OgGH<Xݛ,vftյh]WA5vQKPU =iMϯVge`\oGxJl RzF & LK,^SP(t2y Og/F]:ti_2AZ $t:̀8?|୒'_QNs6ÄSY6l>AQ3?X:Y*% r~*=6Ǵ2&юʧ;k9X/dIuzÕ- ee$6 6XlZ2buQa8?퀵h76]) p>Z+=+~*p&R۴ChK J͌G+iY; αcFrᴌ9\#2ո|7r˂p/֪xa^kJ E'nP4ͲQ^_эuݸ)o?+VQh3^ hVo_P[k%u}%4J2y[ج#(2hOw A%V9 :(jhʓz_kI'&J#>hUgb.h XNenBH]A,v:$BJ5ZG z}iTcI#ιԈ-'L6bǓO;ɧ,OO~Otk̳=8gul +Oe`)O4t H穤i[?L+{ڽ(=}V0fv3*gd"J H'[8eSTS.Q9AeD@̀X[tR-h ń>lh5Q>1! kH"7âdoc ɨ %+N]19F5 ppGz Z7NqnUWǨXo|qůu4t8xzȓ-]K^q2jIǬ*}!buI$]&m# )R\gpY{tp{'ѿ?_vBګW7+ˏ:pƠn.&ü]RU_<C#7/]G;/e8WQ}޼7wiy&헨Kˋos8ɻEB[#ˋ> ΝVu5&Vt8`e5ob?I^ll:t:ONO.6w|5~i>u==Sno@o~iTXq+2:+V?ܔ&f*!3A*rRy)G+?>= : ;R:&v^+ (R .TQ Zc+H`Xtbd0ə3V\Uѥ?Z2A&cZ'hbUlv[sj:Vk'#~Fj jwΝ} 9/WͧO67?Y+rwC6Dk!{hSI?'Tcя;.w"Ͻ|~Oh}˖-<|{#y&~:n(~SI̟m< iX됒p _:RMLGH$$w$G$$j.jV59\"rtТA$$;l+)㡿be62S#"9>A]1B灘y:3Y[sZ]3YDN#iu|>RNTκXTgK]]E/sHL1?s5$w\tjzRd],`֥} hUA%5D*OQ JoH*+lGX| :ja&RNgΤzg 9FLokvz{r6O{b̪RBRԎ!AQPb= XD_ %*FcWdO} AR~DZot6Y2%RR`nG}Ng 5$C&$"ߜ#d CJYS%%`nʛYĨk0\buEcL |]ҏ>A5&gHy#-eŞeX1Xs&F#;煎~2(WTՋA3^G9?;>}"8&bB([h\k;G cpc-hڽa]F_1W3k]a ̜ҩd_65 +"%ҁ$A*)mp̪J X5Ԣ11?`U.|><5 ԧAoӓ.Lc>m?X[[|^9XpӮ5v+l!0ei^ulܡ V,Cv4D>.qβNh@<\[A{T={P[!%dUh)$tڅkrFYAM&aIIZuB}6>F]cM" ([U4±_ Dk8ndE1Vf5gG~bg|#9{J89$ϯsWߛV~q#;?|tTȺ jz ΐNO&GEv0Du-/(|[:`A6Kp5S`[Lr[FLՑ[ʱ,S􋛋4yw^/~<a8dp.Y ɴ6lVGa.訃 f $`6S-gC1"X P8%ո>xE- wXsfU8{tvV5 ei{Á}TI ѰRjJ\IJ=)RC2%ri+Q kQ UОqo;V62 ЏQ}Hċˆ׬..<54_dqr2=6.>W[( ʦ1B)EŒPU;= A bd4 "Է`n(Tck}`C5gNiE1k6:q`ƼchQ%|`8GN7Y Ȳ\I*l6(JCr(7lGucNR4$P[aV(>J>TWTR-2kvؓ#6>i`HG=F @U4m%X''#[D9 $6ς|Þ>H+%|Q*fDQCAkj3L={0iA,QlRZhp1˫NWY79(y_ 0/n<^I̸ -46_8x?1?\\؀/bJ7F?>S#"=X2bZd~OATtR*s^߽ Yv|Ur:?NB!2Z 4hji{tM\R t͏Ii@{農Iڬ1xvqufu*:s>O8K ɜbM" [I>bTe>+iX^1C)b^F!VFAʄb.v5IZFUAT~ ;޵Vc5 e/MXw4X,X@SLє@*䜋.xq<2g<ߛlؒ:8T>IVPk)Y]%;~dt|)4 g0&a2}&WMuFF)R~l o'hu) _SY6$AC6ukXv(̶ju򯧓pV[4鲡( .jYxB*ҷ Ac["!-(r4q=%S$i#5սm .G+|vMt\j^';]GEuŽ)QG{{ Ӗț¯t}9wܙIHyޚx|6}Y@T.!8T1G&K#E.ͪs؋qs5ogUKPe(/lVf`nx}g׏ ˋSzcdhх?`MW:8ysW=!WbI9)kӇW>W5<~/ MF ץv{ ;<8*`h49y_{ۗŧu*Ylz\oOחӊ0-'KL>J>gPPrٜn - LW8=vGDy#,UMf!kӟ@39NPߗ5Ʈz3.)y,zY_}?Ÿ__C}Z!z$\xʏ57~0 R"S ]y ֗5ޯ @ ~kof]5* ˂=D˟Ud{BL?rW~{cCnk6־Hs2iko&ʂ{&=UEiro 턷1!t&j,՚]޲;N;=DD(>c)īF)} Pltr/15Ve,DZߜou1gRs&mɣGwO9 Ƙcl2'ѯ<1Oج6;t0 P)Q.CjҚeeӳ"Rw]BUןTo~yD{ɱ6$Qs-9˝R 3^)3ns>l*tl) ޤ\QN)93ZH&zGYYgv*@5Dt* j+5j-u ^ں_v-" B|nb{pG9:XGHk\Ų}|x ?0&]'Aw=p؉;9gWw >iTTrÞQ"3S$ sy=VFo%喘~cnʭuB F{bd1*A\s %)N( sN F9ڰ6Ұ6'ǝ (ӃG`_o[m&0Ϻ}ֵ>ZuϺg]볮Y ={<?W-̠p4hASQ-5u!gh + J3%<&W#4G&H&H'&"`"ZVǝM DB^kF#tێ ݥNտ=&gF"X.W; *3gl"_AATRY ڰjK >"@54.F"TkJkXg슅e,= W.fp?嵞`0Ngrpj&S:$P)GEj5e{(hGdLp*bSE#aHƞQk % 6N0+R:"X[#g3bL̮vkܱ+j˖Q[kk#bfM5%r8NiaB&5S˩VvۍFYh6!f`E;$`)9'@q=r`TyiZٌQ_9%` ""jU="Ȅ<`ˁUx#6RT8(gTUD #"4zHL8cu4(Kn-#bklF.Nx]lKvE2.{\\K!-!qc娠*}sˣEbŮakܱ#=>ò; ŠYq  ~| GMڃqqϯ=/f1{ނ$(yhPrZ鶣L޻A`xeD;u#`*Q((Q18^9E\Q̂ʷ]Շh^zL)#y^?6ƽ'ƅ4y$KWÚA5?q4> ^4n:_0,R [a/.Hl o'ttzfC_ 50fpzlp%!|sMG^duZ4JaQKv|sEfpL 700,qaKKm/bd Ɛs[rX1lq^,y "[H1ͭbnϋHuSGz>f1RӄwLXkx/Ww귉.$=J 1]!)$Vh^qoXqUbM#oE\eN r B}ߞ3SY5A|J>Ds6gٿ?V*oQ0bBq0]/0,qA7z E- A ,'lγg'&]P}?̀F҃ç`d|f ]tMyM_ɵ91c0`DŚ,Lqns%ITrЇ^~X _,㏷?:|x4` >|.4?1>|%vl7>U/]|x \,t?_Stz$&^DӋϳ>z|CC]O/9.--l& {}l%ЩouQ`jnF>G9~qK)>۷o=̭]WsZt+RJH, />e<:;{TC]a廗ER}+(ljB+SHN*?΅]|^KžO( Z߉ҏc]py.`j\1R5E"TDrL.Οʦ؁v2($9!^,InǍue4SF۠2}^K-wZjyI)(kk >xR lo䍙EX+n u}=E霍? Z m* J.Q,ETT# Q20 ̶{PFg  $& I/w(j7-rgVLNJoÌfϪP *zzYy|3='%݇W(&(2h;]Ez`cv1:,f~<*?Om--tac^b]-GC%ydLLIkmn ̏4[;^\QOwr`ivgd3jmtP:2ʽ䁪hYIA4B, R*B"&8؄Z= D:RIX8FҎY | \KƭGoY1; p|6'$}HM57_<Ģd񮶐xRCö,h@̺\ luH3SZɉ{|E$$ BIi.] s7b|A^K\uHwD@4:&(fEwZI%PD0@AP(@2qaNw[z, ( CfAXA ;Da8(("y5xis ARqrao?O.RW#֊]5VWyLRCUKfRG!F O";5Ygm럪 .7=9ݛSND`TjCgg.LXϦ68Wҁ 9Uy:  ͛ŋqռb|̉A\[/v9 G_Ϧ!+@9Ciґ_>I0qVVX:<9 V,ah8zr9f?]196WG%h$Fmk?.u}6 (fmF!)C+:O޼N8o_>8Dz 8B3׆կI[CxTumκZuƕmNa [}7Q1ˋ?q)㢎.hB93 q?)\]}O KyҟdZzD_E>e-F8牠e/+EmV6n"H` !,2H",Jp#ۀc0xDAJL (8tΆ{w6,//]t8xzW ]S*F8F4))!Vhd%-ZELwʙNuuanZPdg8+NKԪ.a %¼zSjwAΕ-v9GBp6JNer+gI1D*}[;(hsmYg4;b `8zւլyp\Vm:Q;ĬWK@Hm ʈdE3"J8sxf_m1ܞEZȈno^!zk mI˧S3GU H/x Y 0̦.Is>^-~ \pAi˽ci8:6{VviOu-^:f5*mkN.(\ׅmi=%Ar~4a$7X0`ȏ[P9PRT,,Qp2ݩXwQ}9};9B$k픕J2=eQe/(ȧƽ>)`aRkr 89*Ȣ`;X+\$JCƶk0Χ\;޳qJ+'^[7} r!ة>$䭫>S`^7sh鴴3cn429;) P ٲղA`i_Q] Q>zԁHw^xkrJFB#I&ГZU=(sVA- 0C &#eU5p8,wp ,cDž\ N{:HקKA.Uܷ&xrm:nlQ)vJL>JBE[9Oǧ6"Ǵ;kHE>ג)E 0;e86xD[HwڝxzssBy6Nz a$w){Ki[PB*gRbr𿠹T.aYHd!a{pHF8e\5ˉD?{۶BJ~(ͺ6v[4]`oں%WҸEClV,NĤuk7g'N|v]=;t/#Ʊv瞛ϟ{Nd~=7Ϲ)UPgff(e?$Ӹ'2 Zuw'wi}!glqX|4 pŊ2ҹž7h)YDL@"2A v_&RΗ2̨r hb0^(GdMnR`.cy]76coی[b3SFSf0 2|Ɖ&j2]&9wp+8d_>:B/dp{L}ٯSj;;ZoO]5>.G&&`{7̉󈸙\&;"nv_#f*h׈7nDآR#"޿3frWQv\JA;\ `hIL *\1 [W6"pu8⊐g[W(XhprW~t\J:\ Dp)c r Pm4J*)W(Xhpr rv\Ja;\ 2&+jG& Z*ڎ+P -wMr'W,ӓ.] }rv纀 KHH䗷/dxCRF;4$b ـ>Lo?txp֚ZUݤ(WDcUZ9;Ti9CĕИ@,1\Nbܻ+TY*;\^p *XTLq*Eg]}9,z#2 W}/14S=JѲD=5*@0U3Ƃ+TZ+Ti:\"QpŌ&F+Pl=P&WX)b g8ܹ+\\bUDWv!JXjW²0Wv+t,BWJBEDB: ʵLZJLq*puR.Z3ZD% 8Hc*hNm V7k$ Q.`rvLJYiM4φˤ*>|Z_ |{Q;)+IK&COIjN/-sOKLI|t<̦gп|dbqg*hbdVI>JM352pBP zL%Ptq.I9W!Iaxq-_5EK}zLw~y7GhfSƼNE4㖥48\SS瓊 WOjL?o!_%s%-H9SY)zk I%+j z]>dыKYA4PQ2М%˝` ƄTƴ-3.>E,ClT#?}*u7zClKc$"\`IU%Ă+Tiq* pJ>eי ~w㪙\g\5S+te3e.Ke\W-zzu8fj5lRW+F,"\o"AK3LWRt:D\}RG+NnfrWVȶ U*qŘPj up@M'cSpdګ\o]|% ɻվ\;Jx< YxGoVIҍ]a_>$ tW⫒Т Z7Ǔȏ%a.!욠cdpFBGql 8Sdj2ABn]*piqQUXԣ쨪`V=Պo-݉N]O +=dT 9ɀ*O>9毒ՄH"i; _&Zzo엋sZc֮5]eC;{٢=lsVkCDd`&D4Uь,QiTrҙjhIj  P4S+Eq*U7vRjn# ,3\#c5ˌW+M`>}Ȁ=v&Eil*A$l^CTiô1qprWvo7f6SYۿppe9-pNH Q jv\J+;\}1R,z#K`#W$S3jO/4-[U p:\=)WDŽ+5ٻfrWV Uq-oHh36\1sOڎ+TmhD+X4Bjo֚ T9p,BiP bBRv:@\Ia g8ؾ+\\cԝuuRs# V<\\ m`T ڭ $4#r7UP-j-Vx5u^%LF-xEF.#T#Fj%վUY[l<ִρ^jj(<E㳪Q2۹kXM-`*rU43v_05RjaL-ڢtm< h֙A%B[g>D\Yei1F++l,Bg$P\ZIw\5,t5kH{U3 Wt=yV+W(WXpjl;@%o۶Wς+5%,"\`!uD*v\J;\ Dp)W(wߞm?PW.c:?G72ͧc5%|̦}~eظ9*R4X^Qh} R:A:y=_ha$eB~6| *4Sc]" 0M$?A!B@gE LCfd>zhtX\ǣ$vP(9kK0K>B)R FzeCf;YqԷT'>Bځ]>|r6Wet(>ޱY;^TH3Ky#At-OIx>hl0GX(5ڧl}iٽ)X(30 [d$ w`2W>ϤqFekC̼/VSS40)p"19 4T:`liUAsƥ,&玠LŎMɹ\rMy8;XYB\I앯[[8u' Ѕ={nMjGzgvۋU2^~Z֣(S=JQ-wa_S#ڦ:,#joUU2\1'{UYUCF bF+v¤hZqN`"n~NvtYM:Ic=JŮʌ=Nm<4BeȉSɉ9N+f ҆kn+͂ ;2I!@BC D08AЯ^[ZxKر39[Wi"csɕ5T2t)t)שNBo2Jo?h4KGbb6Ngew9Pp< Z< º\3*heΠ],̓&+ku>Hh^Ln{5Xq0Z mNPL gM2hAq-@ u+5mUBK7bMM'Qwc}HPBJH2T `:\)ƍNBr e+\UT)5=!Hʌk͕$˸ Sm=eFdT "Xq7nRepeINۜǽ* q3eBI!XT`\ 6w4RՇp<,NB6$c]-/2$ze4q'PyoS .[V pV$o&9JCH8Ը> Tr@j4I>r,ݼa/&C;LNF!Hzw画^V<]F6]~Q$djUlN3GHȳ@%6v.l*mN;rOlML_?&~y4!\&YꉪؾY `^~rɘ&0٨=Q¿4z viutaz10mLɕ4-NZMQꋗav\9> hxz[n7y[IƓs:sGa d[L+nIc: X\tk4I'AE'*MĉZ^c]%r[g,-q?F,|7VZːLѧ=L wNl8^|TǯG'|_~՛ߎO|z fA*A>YO` F?Q/"jo57!Q]zW2[|$xЭܪq>a;,><_rZ6b'ɉ_Oeﮫn}DvZx+TG(ػ6v$+z쎘%y~y a{LKl]{}P&\dU],DU!$'/Lqeb-?ZD/Hz{s$BJE2,_LP $֕)R!(GU+!Pܤ<lsyz~byB _"SPdWY^6P4;5&J0$#h鲧3믗?9}ggߗ͉8:ʠ?V-whqqaؙo(yr-cg vy$vO?l]yp];WvS0$Fw8Xm9lgu=aKYeyWNVB0LB(uJIXs.ր Zs슓XcZgRA +M+WXl.WlyIA)0uI݆E#P'j݅>9' g=/];Yd~|G:yqqѱБ35)_K{5e/h=~^EFX-(F-7Dn:#" ̰wgyOsymtzv]Χ_t"ỻ~,wpMki=i>췅aYӗQq!2[R|3|~v-^xM~wvmy3/(}]o>9_ncZ|-!g,n1pCh{\kS/˿O&W}>ף{@- b;C4;Қ9xg7\_]NzDc 9 հF!pCF4#Aׯ^FFX5ªjsQSSn& GXJGHu$H! R-K?]=uR\Y*cC G(#xv(-J3C }.&r%[5k"ZKq*Xc;]j 8_2TqN%)V6͉.NK~co\=^rg d;۽?oSB@Odk!zr{6sRڦIr+9x"C%_>7pftJ&Z@ Lg ͑M_ OoUI˦NR%Y0 C2)bmO\۞=!-o{Zc&]|NU 3J.dUȖbL6)UY|e]C A-|MthpF^ϗB=c}2Է11ٓf0;r焥W"0KDHp8Q{9^$X,$.YgCRhU=hU U^@P$Sm+K/fUcVYZHS6EZ,3)tLA]IП#+!}:6z9[/womE-:_җ7>/CF=볥̗ ]W)Om#_zk6ȟy.]^? C͵Q›zֳo뻗y>} e?Os֡0w>de\?..wwu}ិ󂶷a= ۯn踐G"#56~\!  ]8-1.#k?5#:!XΉˀ=2bS0 ͑Ue~ػw^N#{i,{8**hrS4D6ƑYٚL %%j x"GlWnI j5Tm :6r -Vo>+$*vf݆%{J˫?ӝ/?j%U#X]_[?pTu X3ι ֫ A؃OmnbОF,֍]4 QS $d| T rJNpTQ+\)%iHR^ONvDdfM,`2FWFq *`ɱTv5Tv*DOڋԘ*Zt0ɸ2[ش%Άm8 ?41J`)_==-' [-Q3rƯW?ˏzm^yRUMg YGg &#f}!PK)Z=v1#Rc2ד`fiz{l.DE.4%mp(:Mc)ZgK)JbV`"A[u|࿔ܺ$1HُQ>ރ<E?|~ɥӗiX05l +}_M%%VI>AF jCJ)gP9;z-d f1i,}FS?Q )]^}6۞0!ެ -eoluqu,M$Lb-sd-HՓӆŞ/9P6:-i5Ԅ稵4u!R2T17B:c {KbT5{(݆ق_L|]UګQ|\)nj^12˞dN)!lʡˆ+͏Z e 8_6hN""w~2=#lI1W:8뫑y3 bjrh:4h˝r"0O'|*)_3 mVF'j{f &x\tMx1k2VGo(RvSZ/>?xy}n}e%*fL#od_NZC3Iɻ`F@49G؂1S1p|42E 6ʈ3HG# ,$V5Ԍ&ik}y*7ٱdy*i=|ک~ ({uBixmwxP`#yhBqֹXt6Yv-@ILY6\1fs?_[=ƨ>>pם5Yzѕo-~/QQl7xyׇ_Gӑ ?bl_8y.w 4OFM۽ѻkynʳ?~7b:yГ~[?^O9pg ^_.^ogGrVע(^7·>o˱B?x5o''8fYr$DѾy+c~s~:y c\jnv_p6=—!߲׼|¡yW}oP-|6J Q.fgqJ]ozN{1@ӨdV%?)K)NְY`D`tsqsϚQJJ&IY_"gkB#(߬foWNarht=JKJrA[ќܼbM]Յ2Ch.Qq6 |<ٳIF]zsxdCMg`mvep"uf%@VtPὡȼ1&l7y1醉 {e}Wk4slS}@BTЮf+'DO/Fpͺpb6e(K<M>;E>OqyY#Y}a\5:8?g7>ymG/~7Jy^b>l)c:>=SE8Qu=o3} '{|Sc?W ~,6C8:7g(+(14.!tB@XqԲ)+ ԁ#WD*)6wZrM}r @Ӆ9R^g @ \i_z5Mvx>-uyz jrǧ{t&e"Sg hj3C]TRy,%blԠ2^ ŅwQ0S9lhYoT9_277-ṇWo\3!PO4"$4Z+(`lOö(;rqXd!jN&`S:DW4}QmD1PEIN(-czlGO)o!(@N[',bcw:b, ðtRlF3tz1Ldf[pfINѤ6*~U]fJՏȋyXt"h3@NgAC񼩪R4b{_t<M%M!FF_Mpg ǹ*D&F肶4KBibl閹 Ai|DzexN3+ڧHlce"4 id@!Ri_+ҾPHSpTv\b4 cNW$F$ljbT21ԀPcpr|(8ghc&Չ(jt*ה))&Ζm(_O\]NWmТ勫)]dVl/ߢ ]tĶm~t obڶgM٥3oVbX 6*1(Sxgyꦻ`v+{Vƾ]lv]j'f-/\~fJ&1ܙoL+L1858/90nWo8j|~Ա G/?ae®8CoG ~9ix*/~Ջ'5vpz/)"yF潃,"3?ek7vK8ڔ֣g]Bw}+h_@ ]V⹻fhrkOnnH oOa0gݡ-t?ϼ^0P43O: 0~r Z^{uٖ'|23~xÁ )ʈ}J-%<ZJNdlXЍH5-kh$xI-ǰ'O2?'ŧ>8O}ZwO7ކ Ro]?n򸠢 3(OF8e5H^H3u\Dm"Ig$s;Mll e`o]@FK=+=Q -K@j Q!ȝ "D)Q)a U[k4O_Vmu-'Ip}>H]_ 10ZpW0xwS./pMݢz2,ߍ>ʪO}GWye?oGܧ7~mukgx-s|'ٳB|Y:_foK7 {rPYnnBz+!L0lS!0ii_%j'z{aWF|v_g af,k&i)PK"$NZNz}P6SU1 qT09AIhZeP`>Y_Gا*qd0 8p}$6ND#E " L9]mD[FrT HG ߒ_!p8I4ep/Qm[2yWR/0B "LZ\Dx7l1,QFs${1!q{QDo:$XLJp*8PD\PI֌٬abUºPU]4y2#9-&W?NVw~wAt25v BGiLh$SeY.x@pNwZ(IjlnrT,MW) :mGML(J>&o{kblntLCFǮZ[ںj`VCIrץ4&'dr ֬%ʝ8FcORi/ !2hϣF]"A0s+r>h^Ba}XL5g)Y1F,6>vՈF4U#V:dIĢ(KB&2o X@E55BmRKYϸQEE5{pTVX#gF|wS;ԋYqCu%E[X/ڪ^\7N㠵Aż BtPKJMC5WR: X4,̇}CчFǎp1⣄>d}`WE]#7z6!W[glW(Q1cXG'OoSӽ-I,i4j|cƧ\dʹHA^ 0^]PN=0ȝd)N6:%QB(abT[3F87Y- "5Hޣ]^[cMw稬~t.J;}YI¥,4sX+ dhi )EB3Z[*3Re-P\Vz0`JY՚1Tɓ{(kx'.OotbZF gs/R685@AD&,$/ ǥs [$)NxDRBHψZcO9344K1q6njCjݼZ+_iXY(_JВEB>5#bFA%珆%+caF.O(3 J((_!3 cZN㫧R}joqAFg{ 2Ve.PX9rڢ& @I )h2|P4(\\$IJ?ͺ '`*Duu 'Y&P(AT:]8[Atz hn xz}0Uؕ$ٱTڡ%B;Ўem3,j^?y-S8^M<)q/,VFQǚ$iѡ!hRՠuxNIz8n'YVmL0gvCF¥gښ۸_ae7u2.d$uIK\*\-(!%ʩOc8I"GXQ4 h|D:9e"hJ2C$4)(KbAft ̈́RG!p;m<832QȄQ MY3yMؤ[/k2%]_3?܎|K>~.h}/Ƽ\3 >,R7YkSgb6* !)rQ!Vw uQ8b|A{|qo۞+.g LOf JJSt6~V*hQ"K A\y,*['Q2!j4KXžuW`k2_y&ԥ9)ZySIpk >}ښzDUuМ0,U&0S&kTf Sfiؑ@bl0σNh^)B4"H t.m?J^No L (Z^d?n0~p;<%8|NFG d$aCrII)v<׮ww:Ax}{>ԒD UG'$:st|\e8p޺ ?]dL>lC })SgW3)(R #գ AN䋷~iY>>Ŭ6>q&Gs3*YSvgzeߓO~.?zv0{ԏbF̵^dnBp)"&o =]uj ,2R? Y̻߻Nxg{bp{eduAZ7֌$0qǩ?# u83a@V4*'~4O>o.޿}?\矿|鋷? g&|5 ގw-XS]cӮuMuUM߇#[:]?rKcۯ?ûg<߻)XyZ'#ܓ;a$zw|DI?S+0J|1zBt,Q0Aȕ"M:ȘRl(0P`΃ SW!ܝAhm2" '+=.4\Yäd"%5eiW`+ʁ4:)N#g:xs;<xӦxbCxSʛ7|:|e3v :g9sYdĥTL$Аf|O {DrO* frj}Ay:h:q4FbUϏ A8BUs42fy^]^u8l̮;,P︊7LI`XQ4r '=HHT:* o#[YoHMy8/D~ vd.y:ط̑` grwyVCG2T5"1T#RWH0#hU&cQWZ]]e*뤵ը+A)Ǥ2*u,*Sk޺B*5+IAcr`}De2`E]ej?tu4UWQ])v=cg9xZrQ]";]ՕqdXCĹw�3L^ 8=\whALs NVjǭ( i7d|f=v_d,¨I5! J!J).0D>d֬b;pUSB4|"ZwoN%j9wX# cC*ɔM3u lz}C =ghY 3>lśo@3QU@24zyCʅ2қu۰eӗUw̻UW]1/NxЋ(/O*pɃ0ŧp%?mŽzkw `f+R-,-"T'pfT۰Qh!ф2/]R Dzh!Smh4Z#RWH9L.cQWZE]]e*u#RW BѨLNRWZqi1J[uͨ+қS41|YSﴘ+GTQWϣԕy2xAY㆕+Ўƙ-Ò|A,7 >X.K,ldnj\D /KQm]AW!;c|2|t PWY4y~yw)H?uFpY<"JFhA%#5cZ&ICtRZ(A.M&inW 5\|n5=I-;ۇe0&ay ZOq޼}DaX|LVgTL שr|311ȼ>]H XFe1:#-B2cwl![OT@|룢?Nۚ EO&`OoGv|G's|;-)u%-'w>8mfKyD! ב8D.'Ak欇֞h3(>+ d|FVPX`A1b(-f a C{) d8 HS(2n) ee)חJ("(Ebh18{뜡DB0*QE c F8-)#g y|H)ܫ۷ 1[󐁃Kv0Z k48soo|7E&Refhڅoz0)Y공\IxbY@o6 ɭ^)}ʨ5Q.ǟ.&on?~;aCz]z6,*n]UO7^9kj~ +ڛ~|+;_MDU=so+Xs:ͭA㩪 %Pε_nZW!-zn-*4 L4}sJXӔo^{ ŰgP`% Sn"Rl * 2U``x.z/㽃^I/%_5(,̉{N (^"`(t)dXJBQi6Qmvfunh9U "=>[:w{=x>K顮練|W9|ȣI*2},IZ TmRkL*(~/hjQdꄾp \Qeh1PIIPNp/IRe cgg+u=I&`ē76H WƁ>PBZA=N#`A  ;ՍS=u^-[MaΘ)zi(=g0?Ŋ#ѬuP%mB,-Aޢe%%WBc4QY!y.4JQ#Ei pgՎseLޭ  а4FΚQ&!I h&4T[S̀ң5wBzf!8d2OCc;_P|0GY6єc HnaJ;OuRs; fZ,D:#ârc* d'Ɯ:&H,)J'qZ2|T``(|Z~W}~Q9uV>X jO/vtz~8P}0ݗ 2!SE|⬎n^9ocB]Dm?="l>\'o"F5!3iK1n$H }]m gwLoYsfzk)낦KzZ^_%kK1q:tgdJMw?zu~![]RuJ.XɒC NN!ݠ3VptZRV*1 h!rmԿN{9m+~L>TƻTefRISODjDʎ )Q6A6<@xdg\xAN k A-)J$RDN+WeD'ejp {4, # WDPV/Vgx$ ې, tX:7(`JTU Ujm+d@l$Sƪb|v2>Ajz1΁6D.n -r!`"0B5P PQ{XRr,A 6#N:G!;OU0]| Lٕga )"򷚬npZZ2̓ ^vzH~(Y ~Ʈ]6 3(H 0X(a'D %#@ DEL`k.'`0VAGYDVe#oPjJ1Mz+Rpή{ oP[A AU ~ފ祻,a bJ-ĩ FSX+ٴviż^aeeW/NC@ݨJGB܆ s0aءǿi EN4aYT-G(Zth@$ `iȯfU(3h7%<%2` rh*1l=#F@b9AD;=hP"X 32XAH` H nQ9$di)ʀ)7 !KP}f=hl3 'jSH"VRe 4iWķ'AzU C {PeQ0RT AG 4gԟցi,+~(]ڈr1AT48vTvVsgjz`JQZ^r+wS !W(-(Us'k:E'Gj(~n")"awUX6 #opa+EX!FH g@hBXN F:L@٫8A=觶XGޓgc)kÝL6_9ed$`VM;A$`N #&!Kk\*?u+ygV} @۔zq!w󶸮Mjg ״*k=E[;=Wd)=EVS' ܅\ol;c~鍁gI!lڻ^(}@hl 56hb~a>zULw:1,Ts4s8SdwqkJUl潍>5!B:0`I5h+wHQnI_pns̹ƈw/)vd}#ts)\rlϭ׼tx%Sr-@ R7-T'Fx6dnNaW)l(sG^1/A\}׸zӡ՛krNb1'0Z?{L=&ccb11{L=&ccb11{L=&ccb11{L=&ccb11{L=&ccb11{L=&ccWo S ϧlzLZ c{L|=&ko1x1\v9E]UM[i9"?:= SR:cG;JκXN^V&6p2e?ׯ-iA5|c0| q$)BU˚$kW$["ϊ ^)!uzq4)M~Y?;~xwYכbS`G_~?cdrM5"25 M6Z㴑:; ƍG`Oh2% Uޭ| `w'7YRJj٫$Aa9`y*iD<4|TK0x.q0.jz/zq1+u*׹ W6&\̏}ʆ@HBFS9wɡ3 ,.V=3"ϗ<5+f*Y 9aE%\";3[ҌwhGy|~tA៌ԇ3K7 ^׭z5 eDU[ArKؚ~~>}Oޤ=1{[1q`2t̨"EC+Y:n v#KC8fwE~^W_8ʓVZ]3My iCATK$![haa{cY/fEޣIW(2ȼ֪ըlx:A"v׽o3mjl0Սl!Zͯi{ȍW9vaQvqp|1]謹4gurzVB7'e1.|y0}1n~兛?Z^5y,ڂ듮=ۡ-=fחQp#FpGkE\[/)b qńd^Y~Y1Pm_ D>@[<ɹm Z.ݒ1賈=$ݮG)rⱛIp[8W;߸ϵ'pyZ:|"&y}souq:<x2Vvo䢖].o:_fś˸gͶ:mȯ.]bpu7BrYzH=jgؕ&mr/ė``P؄]9`Ǐ#Ǽ]3mvc8|Zfhq1t`ا.x7GdZ/COt7'޾3l3r[ufONQn۽?~,9)vs$مedW7 \j4h[[I2ÏG/ܝ_F# fOfrnI?ݕ0)PQJNc3{ s |~Q]6ic i:L!r~ ڙ_.]X{5~id1<8*j{4MBms}.wg`e˧dԂ}Uُ. 6)0^@{ԁqDsqn8@J3JJ5QUAI. &uc9B|Tc!+" ڥPiDmiBu#A>DOI[w8Fn,E\ 1|Pd 7x;GrX8ɂNu~D}g:yKg*N.jRZ#Nb;O_7eҒ]n u>Zw quS8lb+xmHa{إfuЧ2xuT&ڠ%_140TnT?x@׀R"DcX -hWߊ1n8L PԸE`yS9#ƌkNkaa:$nʀGJ pq{~pFqp 2#iF7JkhYC~Ѓ& j N7;c<\JRvb +v ?dA[D1Z\:y' C:ud1]9M1QZQmCJIe}&j~ j oUa@?F fKnl<|e7}} ۇ]([~89}ׯ072)챀@9bv 4haQF})bZeY:;4ZK֍V ΋E%a@Y`7K SiFeIpݰ(!A/{ó\zȍ%sQX!RA AJ@r-WDi9MG0eQO0+@o؊D%,trwp= 9N?u7/LNT>pNeQ0SQ*FrGu3Z`p E aWh pN;pc +ZgjF=ܭQ[aP}̭CxR  pkv}dsҐRvO(A Ǩsp5Ci1.0\,H . g@BXY>2 N F8@8N՞3P76rJlRVAvij"|9Y6ɮAT`/g 5AsW_ກ銇@ޑ5G V>[OOoQB`v]Q~wdQ})?|];pSwjem܀"Ur6[ߏ ѓ凎#Z%:o{,?}5Y癛'߿ϲRe<<+}hZZ- 2RR_eU~ZȾf4Wir/|tݷB/>zXgg{Sxb$|?!N/fpDpNU ܗ/]T%Dd~j\bwQ3y2w)5=m5Iylw/U\3{K_BF>-EΩ7Z0/D%,$kz u( 3xz&Zۥ}U??.X.u)/ߎ 9>ZaZRom }ݻu֦~?7w[)'{=c{Z&mc'OPZbXW\Fdw߉7ӼᷤϓLd-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-{Kޒ-?6YO|AI0b\bڔ^{7 fKOLv^]s@.>sBw л[6&s b]ے_#>wGXR㏱n0vv;KmeH{9X&ESD_rI^+Ys;)3Ψߛyd+}Oy_}N ub78W뾂޿$>ywrݑ exk~T`ݙK&*;|1u4u83qN3P;gzk{\p_O'pp*'JaCd<(ޙ:87*rʋ&U.]/Q=0fm IAe"d8\=躨#Ĕ\ 3lDdf8Liq,X6,|Q,HoʌߤKA}h?XvZןO~Ljݥrq9U5{"b9a۩aPW_NaӇ4UW‶3)GSCو=͜48dԎjo {=f*w3'#Ydʀyc}eS`7<K< ZàG`t25#0 /*5K2%20.DkAiq,"Ɉ6D Hz=;dl!Q[k)y&:@P"8͜;?.k%by y|25ݼkD pj].) jOeW̸)c[w =lQͻ1[CQsmKQwmmH42;iPM>d( nJ^g!3o-zj}F&^ڑH\ɪ\{\{IH\{\Q] QL5l=$;/5)B*T$N !,'v.q8}3_,,K^q/tT/ ç=Cgkz݀Vb>$"^v|LmvP`Pޒ:2j)RǼ!*"n1M:m%ߣtǤKn"טT;DmsI@UȉJIA4B, hƢ KdKgR&MV6푱9+>N \ [ f|9q]݋˻Mh>)aY{Mg*?ɉ{|TO04^~9 %h6 IoS30gq] G$nn`gjXF1mR'YW A#9@GS(4M8f ( u=W  1U[lB8TaR[q54fPMw+o認N´J׋ViNNn"j |;~<6G^wA%GnzٵIr ibO|1R>fچ1k{tUl0Rsm0ٻݮGx| ora!ՒƝ-1fHg3 fV'{X>ƭ~7bǣE77m-c+czUdW]} hcjaFfet'.= 8VIm#\gay my9ahvrSu闷~%ͧ_?'Lԧ_'xRt"ap鿽Msiho4U>M󜽞+svݻˇlufn ҏތAK=H-7'#ؓ+ 6,YؠT-DFsߪE>fG%wUU?_l֭p"$Jhفk9{P5b0xDAJL(8`Ó*nW> /=tRD=ha&{!bɄ=TR*JfʙN3&L//8};~4ΐmNн xyN:?9ﹱ=d;Ό[N;̪,i; ?ڠo:fH 3f`ԖS egd7ر'zx 2.Ĕg91I92Im&N<ْ uT=)_G޼T3Ͽ{S]7;ϕwۯʀ\DqJa0FBL2ZN*PVx,޹3ݩ; @ooeTųkV7MWa27~jgSy|f89CJ)RYb(U6(HwG' M3aASIRSg8MGZD5Բ /L4tk14݅MQ:.t{0MQLg21!'g@^ĕPAZP˓X^NPL=z\ urԕ$.}m 4Y"\~=r\?_xMj%}tz']Zx1|3W<j=3q=8ޮ8,џM<nx 'VO᏷&Va;[ug#Ma b.m?FyF ż~e.85H}[d jHrgְْlI~ GEĦԅk% J3%<&%{3pO^<s`"Z`θ=0=IJ.4PWZ=LG <4BwZ D佖豉hj4'-sO\*GOON'gNC:``-r+K83J*˂6,0n#X.$b+ jxT~8H$*pJ˼P{ϓo|q`d&/ɞVI/h:n;С@0'? |ڎ'%'w,vGeBU+.EG|ph8X+SF2pm$ ҺQa#RyMd>Z`P>RX{ȤoW12 P`A DPR5c6qvk|J6]mf Ef](.|T](mz,Ŷz7|asݸ~j3'{b1{"LXiI8S$jʨoQkI#2J&8[lhdgx $ cTE£Tpny4^3#PE/>^ p{ٱ>\ȡa}*lYu3 y/0,G\|K8`8c\lƌ[flQ7Uf/-s(o7ɏdr3dRq zՒJU7%/Us6晘0Ӱ\U{Z2֔R[3E ZCt;!c.o}M8jf ٺSN5QSZL4%[e"[7p`iL < HOU *fWNQ%W`y #ЈcD2Y+냉wiʈ8e FQ4 H8Hgvsl݉5muRsΖ \n;[v5nzCk;w6=+8O;+yvt+F!  uarbU\( $zf;i2 vh)hLʌ ; %bp'E}ԗM+xx<]tk fb+J srHx.I`q8I\ !Ie@JtHx Rj!_GJw|TB&zrHֶ™Ik5$hl {аs'8aOn;]lM:7D7L!DĎ[ZRp<{0_))F,p`B60/.mv5KˍOmo=z VF ΃Bє # +q> (PbL|P3(#4a a DD@XZa,<6 0N)>mzd-`tQ}mwp9=nZwl̜"C"ZxU< {ֆ ZE#=i#4KuA-fGpɣxƓ/Si:?`[%]ioG+|Y{>8n;X>-)Pe!)z(QD4k~)C\FK ȔuhDy:< 7LlX 8XsCc-0~U@x\5b af{BRję^=(37Kz:Tkg]0RΞ Şw0tl86>fhF X'Jm|=ŭ0E/?7*un?A}WvMyY|S.ӱg/?T1X\ ק;UNs4|{ 7׻nMgLL=/i9d//&W_ â&2ȞŴ§)_WS~w>{ySϮWO?Vׂ(_ Cq4ҧ.̅NY,`R0r Gў}VKs`ySE]lU@zj ,iŗr|5YH]W\inPYP.Whit!EszuҺ ,/ňL֗5ѫpʛ@ >D&tb%_kجɷMH^q}^^gcAd 6.وnm,\IiˈHWi=dǣ*^bj=ҵI \3ޗ%WIs%)J/ R|A/lVݡZCWV\E!ǥ:ra LORźCGY>=E 9{oFM:j4hO! *^LǓI.rcVw_; <$6WWN7K'(y*.'_^+7=ү=P{ jB͵CQ2{f=  Y!k頺齁lV-޺Ƥj.4yXzMCGns[o7 хw{-r>bTHE,0JL`KF(h(Ƶ3" r/15VpA8Nާ8ю':a'ThRģs5V<[ƃ2xM7a @^u tm^z <gv ^W2[sPJD?+=hc+u36YPf0,Dxln&97:Sf2ہ93@ݙ/bRn}<_v8C̮ JT~o`.-z چEzMW X<,Lʹp{~量^vb{L,c05%a0󨢚+&ڐyOUavؐn' 8>H,EWcFvS-n(S #$y*ZDDyjwRB*꣗T6aISu+¥6&`:Qc({ | \K׬GgْaMlbEhqdrR2JKns/^BW7LT}ꆿeXj4W̪܆J' [MMXgǨ]DJ!)֥ Ac$rnÚ_p!oR"ضܰ;"ΔTD 1f`g) , (O+@2u=*!0 Ua6_0Bd\DK)Y!UJ:S c\L:sglM[Qr)E^:*eb^!lQ K c=etbK1pH/n.,3^[[V93 oA#!(c%f%o#&3fq5j11.<=ٸrwە2Rs⏪@|/˛Ϟ|}:{럟7ip)ZU"e/VxMsU^4UlyO]w6.ˇ:k4fn ׫FARL|^wOH@Rh|NgWZJ"=W`—*D+n勗 L~<,ݲ}>ZFw#8Ym$0`$AFY9{m1Kg" %b&bBczgý;!: ^r# $g\Kr1+kb& 'p2G<~ -aB},}Ahad f }ȭNWHF|I)֫r!}髑BˡM))/eI "dHdJItzi-s؄tlV<{]ʠ|ۺI||3mvo^\wci1':Nt)HRM#6v#v M5s ڻD[rG :m9H2 Aȅ!K/sO,AR W0fff.3r0It06SyG8{z+)#Y.}-_}]wg[ s}odF }" raw:}L|Xfwt=w=Z}Ҩpmo!(zu#,#1oI;j?iݭZorӛ6;h;ٺۆAϷww>Fa>mo}u;z}}2 osV_1t"ߌ1>N{OL㯛 G%u.l:/3X7[cؼ1S?u6M 2չLU1J5x.ľ N%a;p^9Y%z?zWf H Y+"CfWr1-&2C`!g{^yJd$D%Y,9LTrR pFx%x)xޫ'zOB<=Y֐Y\$>woz;ŏPwq%eh!g%Ri6!']!x.B P\%)Ξt2o9e'_42l#.f~aW®7|=^+ BbиŞZ*O^M п 1wLС Cr l 1%wtg<JnGyU\LidnN6Rk pڙ\6%&p5׌gAez SuLHeAYc+3Ptɒș"GQ 0kd@5K_Vg=A:(ƩXjP-"4,Va>FluAf Aod !Kh! &/`g"8MrV\LA% 2AcrK[eXM=*Ɲ&zՁb`=jP*Eb[+iФ̗,5Θ:*,2]<]=6;5ߓLXE7q Y1l kяu+^xJqHa~w aNlWKw%1ĆI b#X yڥfF) %ـTR@NQ ! j\%FW5Q4gT( W!>vpBr˵U;qV4; !q)A2jA4l">+([qLrЄ{Tуw b*[+FzRY3jG@ b+qsW(+9+|%di9p*RmPJ"E\J%1J"$U+k/V/)b5ɂq:4w*Y9$hENhCBԔLs5<͏m|6`%LL˙d5O#4AI;Imc_G#XYܸx\۾4~~yӴ_``2xl)Zh6(+ b_%f9%eb?E(sy04 'z& c"XSHcH&s5/fwㅇ+}A|<ݣnCnj׸DpV†8ĸڸJ*(d!,eWBJCWRgcDK1<5=|n'EANcK\27]dfɢ[1+('B4SM2?cn&xʖkCsM&}Gˤc *JoQ4,Nb1L6MGk15 INQmdwv .HF(ataq&I1͎5Aicr5׌gL9k10~:_S\Owzy˛`YY[JQœ./'?].%Dyr;ZJ,OߌI`حOEi/'}ec㌂5oppO'ި4d{Yzu_.7cbyXO'ng\\h{ױC첏7~Hy43|i5/ƭ~?[7Wmg}eYR'&x(p67;?6mA~8qwԢ?.+:tٟ# qU:&0=Ū[N3N^ thycy?.F2eW w51:A*'&:aD:ry>X!V2 .zqKƧg7b0Ml͡m mu[2^w;C?+`g!bP).;I5r$'8L &D].φIv3Yv)y%PhHމx%-RYtZȺF$pȡx.rH@Rwdk'!AXB#s@'_˟jmi ;eS`s ^a!ꁜzLOOSt2/j3q؜3y`eEIB,C8FkcLecAbF+A̸O[A2^&3!J]ж{$c$*,JpQI$gt k&Ξ{7tB&FF|,v/fƊn w[ݭ!Syr,n#N.*iÃKC1dap!wƵQ|uڱ;"ϔTD2'T4?KI%`#AyƪCcaPIa(%|t%BO\>rP*m)e.^ ]k54iATR^Q䥣2Mc[d U!x,'IbK1pHvj;1m= f9ꝷ"rg  #!(c%oK8[vL ͤɛO:=̲U z` ݽe̺eX -%d8΍Dt$.??8 <(%C?0RgNaHD0$Hg:S1itx /- UE-6`M@ZX%U WjVp[}*ža8+cqUZ$~ W$R2`>M\J:0Y.lT? u3QnITM\&3_9ӫ/_J?=7_z'Lԧӻa ,8:{!!t}YwϚYS5ԋ K"fᄮ-پ[ Ecٟj>GOH@`LŮ*AliI@%^(1}xd>(>DHW[m5q[1}>B{p #מs#ۀc0xDAJL(86,)//zou 8|}oT0Qs6pЌBSiҤgcdЦXEqiJLh%37Jj]oe6q^L]׋NS;{g\+tCVv3mit1c`w <\(-ͥB&gL\s-r5aL5 <vC`i0F% tT`Ύ):NثnϊM </MGZa&̭ͬR$FtL2\p(F%«_GNKcT` 9a3f)m4U aU8RJ9Й+ 9S Y Vh3W#846pC.xFv,PYgg4I#H+l:<(婠B蝉0H&p:A5VU"f 9yxt3*3cpg(6%<#* -e{B«]Y >GᩳH sɭ`)$-{|+ĵ7qT(NΝ\>ڧA*c1bhx.=t6ւ{y@WG/btRQMS1` 6HF_jA;@{- )UbQSoX$:Pɼ[H;xyoCo0^ʶV+nX{VI1 7uL~%v@,5% -@# I1*S }uːVoku*TWmuj&]}r/b$EXqr-O_/0siLZPO:Y6TrȾ/xL-:q)op~ *tZFPl鶴vKpl[u&WP,Y470`/*X^r+N(O+L*:,ㅵSV*ɬYG ` wBho&FJ2ذ@y㪝=O9DŽRnՀEmrzudXDcZ)"VD ht|?t|:"Za?Mho=Lgq T'I蟟M|j5]8ZEӧ?=)'G=6wUчK?4J;G`_Mp\~߾yyZn2rJr=̠_,,Y1mYgIPO.D\("2p!  ]|ݤޮxt[wt[ nXݺ\R§{;o2eC-f63OӒ|{A7Yyܛ-@{<{}n'k̮F x2O{,_qt3)% XUES *,NW eQKWBZWQJpo ]%tPJ#+!9Wn)”$^b-ՎEep_3{ /fѬ|CmŬu S㋭A,"Nh{<J%Xj>WfϦf* m:PQeܲ t4 G[\0s,Mƃr\Ц ,T -/-[Ƣc\Iu+:v%xuT]XSe8" E)Dl?oOް/WףRJ?ycgmI 9 >N)bxЄ~R# 3mFK/\5'#5/s $Iq֡Sl6 }b<y,![n5aNcʠN[мI>UH%j]BIEށI`Ǣ; tӹX%b ӣ*،6)x? WI%%U,+J]{rQ SV*ɬYG `z[꣉ 6,)w g:*_*y$7LCȍ󯬃rhtῧp㴄8[֑aY -J)#RHD#Q3Imڥ?4&pVNܛ-.MK񩸃~:p%ꯣ24t᭙]-ҭϧ`Fّ XKw)V s8Qn>6o/ӄ᫙t6`vw8lPsx A>qE[ OV- eG+ $¯C3(5J6r`|/[ I`h ;LR4*!BjvY*JiGה2^S(Ld_Ӓ.פF3)`0ǫ͸I/Ƒm^.@41x JPWa}1JXt8 +1꧌ =}amZ)Hs:\uq& NK|n=cLNؑZC=lX |0xz`PG(ʤs\DKӝ KR0 D Afu o WN Jō9J ` c DY; ZDl hø%LFb)XK/IrTB*j+>}I ӎ/Z_@V۞.I Dt0^eU>-,`9wGoagwu[uKĴzLzcѨ`X6hiP+4*T %i^?R(JUR$ :e׼S}lAzKȘʙ>ާ ŬQ>QXCj3rM퉾+tdI9EZD#Ģ%UDMpXBCD:HXG"\J#`BKt2jv)<dR5b28wٴFW ~ I0@͗_ާ'wwgQлOt=ͲàJ"[:D%8⚚gzȑ_Ly\9}\bD;oZ/F`ngNGӓ]GՉeX>qOtFgS =!̮^lۻl0R <llu;x2:񇷬~ ĺfVfuOQkY+,fOZ#XGb}/Ӈ*CuݳjvHuё6R;6>\7SLʠSy"٪z( Z։gixy'vG~_~K߿'q#pM'"o&@u޵Q]u-vtߥ_#YAֶߖrm Hw_o>4Nӄ?vrq9m]5gKTWlyNWXl6f4or":t[ջ1]]iwe(xe 3wO}4$$ڢpTH̴J#1$ aT9B "(6HI}Ń ״/vk:Y įrL)Tu 0+. 8~M5)]\~ qt^Ae&O9}'&>x`VY29Ӵ`^k/R͢$#sҙ8O5>8v((]AtdPأ&"+ bmerEzuv9XK^E[$>kmP%',%"k:$v0jT=}ui \N%י<7{ }'[Pq3}`09/ HJ}?hx n!5|.CS F1䱉 xCN,/bܢ}.ڧ_h NRb`!Kl\:3Ζ1FPʐƓ)ٖBd^$ fϲ??B'՜ 1DR`cPә8;TjyXJJtײN/\fnjy} ~E9Pc@X{CG7@T+!P$h&B `ri')#oAF2"χMw,tix}6\ w"-n=_]a7k<xEHvb$u/3SgQn}|AY >xp`p<ȭ> {wxi&Se 8qڭ@GQ 1{qݳ ;0-JFWԻ s-NPk޶;=xցƘ gvTJ GrҚ XΛ!ڃ}'^R,g[YM?6훪ihߚgb1RNW~|GJQ1g7^XnVXYӵ`rD&"ju6(bF%43dMA\0Kc*2'Q%4A:(ڦQY4^j7eՠ"Di8Abٞ>jr\uq0.>W'-X:Y`uox:jB@2ĨIqbuF)gOQkH(<9o';GoIij-zgRDP=@&h+s~iuШbg`0Y6 _^5ɻԳ= LP}tg#:Třz'UP2"A)GP sʨE֞4fZ,(t#t{ɏ{<<{枇NbM%ߐe>m*K#?@.ԛ7Zߞy|03Hf;Y&x̢\d(8Җ_ FLeږ::pf֚(MdRk=\ic CYZ) Jxh:ŋIkӌl -]dRL02pFa& Z=蕫KGh$xb^yWKP,`:VR>ۮݏ>Tt׃WUVP Y|,ۓ&tDd+3-9##/}A5SMhkYku0(7)]܌9XmF TrDc2,Q=6C#&j3ٯIp`yVK$efIQBƜlS4iD!5Eyᣗ(G;`YDKa0h &ZUՕ;gGT9Ǵ\xKj z8c/lƇ q2]5r_\4Uح mϧ-Mgݛ\^m(tΩ%V 깟7W}zw3{s.:?HmYW4.&97եj.q뮄xM6=/s>]uޡR̭vsk^eq-c2:KM$t r l ?Bួ=n,ޟ%ўdCxH" '|h'M@SFj.QEcɽM"Ab Tԙ= S>f¢ -R$},G,"t;gG~15W_Y>SOjw^^[;=zrEݘ_tR@c?/#Ycc!]*8iQ@Ml ^%%=טŝ{po9l>RR0evRNK^Q^XIjjVq-zF{*}Vo+Wb*YAHQ> BTv  I92b}`mO5P5 /Mi &q9\; m(+o纴ѯ%kWݺ|,JWI(AYs **A{ F'@cMcHszړru>r g9;`#.8[ml§-L{FfFo"@I`B!0Sw I( $hbh2*L|WgR>VWe7p8gMh^ج3I: ئK{BZCN }X?'@F@eJcߓ\#fܻ~,̇.aށ^=o<8jwT`\?v| &D E1oT}t/'DZŬe-&HA423)M6daQg=R{9˹eSN.s~=;f.o{JÚ$ۢJ*I rpd.)NE;5TFiJHdLr%(kb6Z[cCLJjOsŰnH\Vѓ{[[Q#yq5tw;_ղ\^y\xcJUA&hEʔE`({I22!UV3qn!\b5mEPK*#嚶lVZvR]{١mgX3_ؙf<ڎ}}^}270ceiMGɧpz{lʈYfj%m9a:Te1h9g9Z@X#=< aVBǨnS[,D ov(DEۏJ;N<ݙvϸp)atY4?0o%H eP3RYd&sVKQp&XquYaWY/`ź} E; 69}ޯ#EU(˽E@9Fe@] Gm$ RIZdȌ8CCOF^̏ls)t-*{;Sg *4- |<`˥rJh.(8 > $pIJacq%  NY&rGX_f&0ȼJY{ILN,,a\叫J [e(˳ y~TSewtkJH!;WVmÝg Z'%ht.63MfTpYp}aO7Of<C1Q&~KMT&2z"u\Q`2AB Щ<]G;=##Yosr7\n>ds-^*)Wh-E ^aw6ևIf>4-٦GCD2쮲@l";8Nꠧ/ q*pF‚&(hH 6JB [@ra]4;|-ZOH# 4U}-Zqo/^]N#Tػ>?x!$.Eyя%L^:dA' ZQ"2 Za1X3>9_J/WeVa3|xW@S\G 7ɗSa.pIOؾ?|^jeUHLO%|yQ*S2Jγ@2Ae*GqT vhy&?-VYZ}㼞\8vG\זl!ɟ'$7o0PfH+%}yt8Uӯϓst&ab6NGsVʹm1M38sZ0-VRXl5TkMm5P9iIC U ? W(גXpjq*9puRZIqZb9Ti52lh"RjZ,e"!BrԷ_%l~hy SF~J`_~F0"4,L\% V>DIՀSĴ&ڐ{'nvu]rݞL(ƿ-=\&!y7Pvdl`_t>r_)|[w_D;XPrU%~yjp̫d6YsdX:t+OgJh[sYZ)F"L;1`穊UkKԖHeHd\r~˓O:݇mJ?+w)ʮ7,04PQf2i$˼`&Q~1_jL iӏ'F&W[Ya ~De<7-ԕM7gw~_Aٙ7*ʼnܨ]f_oxqM8`BN­!׽Xʩ,,Hxy߫ݵލK)p1U3> yD2LRQW>|TR2",3ϵlx4BƂ+P{{ U 6J6zyN8U+;8\ukV*9YWrծ]O=Z FD+WVWRW'+ƍ:"\`KL4BǮN}Tr +iIDJhprWKOq*puB(c#BE+X,BB:A\Iы7 }"{ w\J)Āĕ `Fd4B]Z)+Tg$q=N< EB6}k-%GOTF6L@1.5$$a݆EI S)M Vՠ\5j-5QjNњ0RLm'Xhpr W~iTR28?++5?@Rՠ\MX$դk5:dUR ^0[ I';餝Z{V*3ZJ ڵY7xW Q P. :*j\JC\ RpUǬŴ4jjj]r"6jdRU||<[X~p>>P{ttء'KdxEt5Gp>.P ]Y.ٰp]X ]MNWgHW|޽ѕbZ h-_OWn>!]#5{Z]MQW?z(m7+q:"cZ ]0`DiYҕwI8ؓ&ox֧c2&HW!yR&X:c:sE|N͞T=Gt=Ƈ5t8W=؈m8ۚxE7lhm8m%3," 5 2{eNWHW5m`y[w= &Z9zu5Qz]UzЧF ٧WW{ξ̹`hw~(4.AWICOmZ{\&BW@jj/"t J&[|~y]Y:3C6y?e7\Oo>@y=߾Bew`ظXowMv8,b| os6 g8st6gm'^NŔ}&t|'0Sw}mI~P8Rog㷄񙬺ĝxmlT0/@$P~c~- 87'fg0?} fccg>nn!Y!3W/H/z{o&o?N #S5&-q`Jl RE(qLajx{guJH3-@B9AnR=lm\{3ť&2Dž8a$MZo/a B½U93:.4Tm=)0J@=,RFM}^~ $R,!R[·!nvHdWN҉s7f9rb> &3qX 9q1ZcF-9Z󵯀pOD{}kmڥb(!#0Ʊu&fK `?@>R;J\nc0j!fBT&1#FC)$(pOЈ$:_߼ȵ&+*-SY(/`TX -$!wysYU)^{r}ϙTR35.E3ɍD%[C5 :XJ|X@{"Bdq=I1c##[$3e Ȩ&/M(-ņ$k|`5 jk}ɈTp,I[I/-@d/|"OyZ]dBTчl0uÅ*ܜ<HtA{ b>wB >B.@N`dMȗBф i() CAn1xF=J: o0O#4/ h;b9J3**6' šˋ:±e60.Zb"n$A`*.t-c6taGixu lGCVj;xXUAp;H@R:lzdF)f1Tkw˽AAqU.mK` !x X!J ]*M̆eBGkl}4sB:$X(FBťEn(pJZ25HQcmF2YK.B@FwXEzjPB] a@B!ՠPw^KCw(c)(b DaH $,( "*ZJ,՚Jy>:lAZ 8Bj(C͐Ȳ(g ҰM6}EՊ{BufA=aP-]/ĥi& A͊1*LӉ8!a%`/;LpZ/]ӹyߞϏN [ŬF]`"AیZI|tx݃K<`KJ?l:@G*K\$];YU&:cxꘆ'z$;J@ .g ) >@(E&rZcd^[1P>DZX33]ZctX=K`r@!Y#e"ԭx+$@f6/EUd!:T?5y'*杘6dQie6A'nˋ }{o_̻5M6U#;KTf5OMCPڀJ5$[3W"XH-w;$a3.0;T_l.:YyaHZo2S@Kel88//^o2图\?+,90tt!eO`3=8ؗΦAŨѭy5DvԌ<R 2j31@9ѐʌ4M\AI'? 9JC6G=T3ʍڛR :)j*2T2J`*(Hȶ,- = >hJyxHӋ~ج7al쫀'v+EĭSN bBuX1w.P j Q Ge^vE5FHǥH<Đ<R`\3c][cМ.H.x*fn1HAuЬU:6 |dfevT HA|,ޣ~]-w<58ڰ1ٹ|7i" nF]z!@ւJ^k (B:kxQ57bdzlu!~Z)AH@5>dN֞JOQ!,6:a$قn- 0il4fs˥:pUY.iBC,Z*c14&}l+BER!./SJ,w褀k5c4&TgAy-F(GoTv7^ryy i9ioݜrt382Ao /O[?8}?|w6t>EV˧ Ei~oڻ[F7}W>?ow}տy7{CCs;{wgWqoyw;[DQlveũ!Nn|q17W=F΋ѻbϯg'qy}ozusK?:KM^w7Hz~ݯ{߯LJ8;AL:fv{}8G7m}ss*3y0R8< o^T5i}5 Wh-67&ʤgi &x5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp_p85=161\1+2\!:~PhsO WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\W5\%&G&d7\%95\=CÕ7=J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\+ 8j Wi-+ wLGj(Us4\QƪJ WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕp+5\J WjRÕpw#K`i@uI & !)Hm }k:("%vGWUޫ W톫vUpnj7\ W톫n8cRW7!8mo({mxI9.Sh0@)L?lzC]mg]Nr4.bܔKoFj$FY1 "w /O7d(h#KJK( %]λu4\lM@Xd:rڋ1h+?Zu!s:ZU]OxW+ cϝ `Z8OS A~>Bj (N1y 3OKk>2.k>;ßV:(o_aSLJp3|)U%x\R&~:1kpj^c01pn@8B1a41m]!`Ɯh;]BY>HWgOJW*BF6,Uu+DUKWqMOIp&Oz8rt%;[U t%JkEWT&->G7!)thyCLRB6iysUEc rӘ2VOJ#Z}da/pt\A<ZI nr}UD-nAW6=#`VPM+Di Qږ8ᆳ~cEWBם -]!] NyԕYЕaU֝ᶥ#+IJ+]I4}sbWW ƲӕҕίM K+CWWƨ+D;]!JQҕa }]P(aaw<Ay Vaize3F5*R4Wh @K8wc1eh 냟\eBW4u;&+p% +lZژUFV~Q*՟#u;9!vp<3ZviPrU2ۂXKWOmz< +m ]\eSrAX PM-]]1d +̩j ]!C2l򖮎/ %DWX545t%Ztut%I +BлZMNWҘ JIDWؚ1vD߾ڝ%oc+%)#Atn ]\B Qm!CKW/CWYKkjdB bvo6n:y|3&kc- ȍ5Ukq^olevLmKlvƭZ;Rh (fzQ\ѭ.rzr\"׹aen)~x>n8ΏՕi/V/>G!c.%dQdL`< |o,UJ*m?]Rgje?cecﰭ,w< ? A+MT n;?cU; ϣOLT~ŭ2.]n~7kMwH|w7!lCb6J+ޏ-S ^9잜d)숣$[< 61C$IrkCcLE+K,}C[<57NeLO,: 7>wL.``6R xTIVeY'dA19_2HQ|Zi eV?ǜo۝1/g=Yyן2^0ԭ._z=y> %J:t+* 3;Aa`tX}EGN ,켫he=zVf!L)їމI&gY>ޒIUIZI$,}{Mmpg226 KZ&n'AeIƠ1G cP (LنL׌JǾkev/CJI9!5[ eO 5n%<4rhXqgj-yBEY T6Ve4zDr{=knX*4Hh(&'a_*YcZC I>щK"Q!qK [JR4,9!_jhzLGJHPeDIԁkJB=q"62[|hspivvJEo`FbPE  ̧chƻ y 鿻_Ӥ0YYddc(x".&`6n%?b Ԁ?^?W+񱘭gHx9a`gC 8!Bu.IL+)7 ߿'@ԷFXUSI%b^ڤjnzS4TT}\߆.8_̀q)]"ꉪپ]5Obnw_*dLɤ?G=Y.ҿU4: Pm~8qGd Vq Uٝ70r } rwv>[nWu)OS$0e[AȦfKIȻsؔ ۘbc6:mLaOPX8EEWy.z{ 'fܟ뢐rTV.8[UiG0c pT-S1 b8 vܓuXo0]9_N߾oߝRfNw?/P`Pl wnne7?=>a[d͍xLrWc,|ow9]orȭ:__wq7Ë~xDjԜ$1W6t0OBMTXQaqbxVU{(`euX{jb+>\1?r \!B#EhˆI(%Y(}99b*5.SR`ËWV_u 8pV[1e+#|v*@3*˵L)&sЦyó ZXJBYp^ti2|%wqĵ8yt6 82S; +vnW!e;Y#ٶ  }k0}sv hIҨ$ckuxitƾJԆҪs b Yohkж CBdT, T٨2*18o}7\iް>KM>qIr9J Jo "4uFp-o,i9 Ay!4b`cɔfPb4ј :;lAP,HLJtD8+2BXXGc:j`Ԑ"RQIki;stQMdi=5CBüſ x3&rŕe9w`s/]Я-A5VE}qI .x6eboy Lكπgy#n־巯ܢQ Nn^(AxUl(<=v;{iJP1ɸ+!IYo|Y'kӲO %>%s;l'hq.XLvAspQyM["g21%M9ݫICi GX$Ox2[A5@G+= )7~ϢfopqR Bӑ-{Y|Zַ܍nq< /}(~{; f,evA( :::2D Mm>;H,MʯѲ}\r@Y)kIL*]ɲj=eMvKi1.&tP XssӐo}.],8xu 4Xkk]1<}NQ+Q< <#b'6Trł̀ȦͯOx bxݡW[?>1kqSœOZߺ_~G=PoZZPkn=ضJ@JY-_ ջKׇzIR|+jDaŌ CYI2*/l+vXOOłO)3B k q A)=5:~7p))\- YJN͂,ػ8rWtlI_ $$wܷK"-FSHH-plV)IJ$GTTN@Q7}#<3Of-xx+~SZ37ݿN3>Fk5nimo/'CezM ^Sv;J?Bf)\rLgG^]/prGSv̎Flo!}tb4١BgeT M1=bDVb&@2؄!I T4*97NH\IQ9;[ _`ѹS!o&mpgqU0yt?=1qicK\n㓧^_M+=3FpY 4YS,P>h ZTCEp5VôI,Dv`Mw&_o%3s2OuuË1s;m ΃Q{HB9# #gQ9%/͜ΏX:&ƂA> ),0>8c-zakA4w]1i[/'50i5̭)WoS&"@y{Ů{<ۥb꼄9Ib *r %xaЕWB1$]4+R>ZJW#(JE'1 Y]1aɖ 2PC`-3QQx1؈tl\z~[~;љm]VE3h|@ DcW|[6JU.h%:C B06YOO0bԣ8mβDKcwԒT,%䜣2RV-4q9) F>gZ_4֌1 H($h]T K4gGritX/}mvţY/)4z'qX=57>ߡ$[|a rUa:}L|Xft=»w=?=Q[;g; Q<}̽ݝm?JӞp[_[w֛-tzfsQ<}ܲ0[wv·zr|87t3z]|VvW9l˯nB>nS]oSsu5 U 9A| yp:T~Mo?ͮieİ/ӝY՚5S2"|fCspu BWK޵7z|{EhOQ!V'AɠGgI@*(&%(FKI3It0m UUb r HCrE*oU*, !fbH/#B<}Թǣw~[U6Y__7{R-%_ IrXVZk+jc1[ccP*8i@]ɢB=%7I %=V.9WiGo^N5&fY LU "cYC,WUUi;K LroX[0F,w\_qVMԺ$m|> L 4AvY$|M]QFEe^U-ŗksK/o~c]AC.V+{_#\$_~-.jplyְܼZUoI`ŕg_`ͯL}Y<~Fo/p@տQ+Ki粌ȭp~ON:~XfHW\_3 Be>#ow1]QKY~-k,t%He^m{1VW /@'u*͏җkƠx?qKk_ߴT-"BIClW._Y}YACpq>?=Կ3\u3?ˇMEro(q{!}(8T*tntW>2_8%8k?aVʈ=؟iwn_e*1 7SwJxN `p"  3;7-yt56 U{i Y嵯%3nC)c]iK × Z};mP*D]iW汻2oSoч t֠Bq ԋ`(+OΧ`KhID4L+M6# H0QW]Qoq ӣ\lƵK|u,og@ޤ.̷xq||M]yLcmt%,&*^Eedބ脆Pb&!Y>~K§g7bv0ɡ sAm|\` \E-w _e+E8ٗo)w:geK-D|qMk_s_i dUy9<Hx$Ηh Kx4b{{FK[]-uQU녤~<|xbn?ZĴ~ѽv[,2~ڒy alͯ}2zrJOWOY %Ã5zu!^P ]v޾U*byr㵇 Xn_p^gWMc.V~~%_p_Q.V+ ۻ˛|oazwݯH[tО.|xLzÂO뫻7ܮ^׋uO'=m bt+@ͬ)ʅ+ Ɗ Kѕ҆0v])F̺<:$ bt;JѕҲ]WJ⬫ JK +M]ֺ])%YWI)+kB9AW@ ܱJ)ݜ]MQWAݸ2q9ssס쯏_>;[|q% :Nt0| ?}]ڢ4/P9ڥ +lHͻ8oJ$M+g&r(MaYw/M+JܘX5=MGy_wM_틛겭~yXkj ɻԏjފ56yyPQU[P ~Ŭ3RO}q^5&s:^U1+F0v])/JҲ2+Ɨt^U܊YgZkG,R)7κ*]Wc)R0_= u5 79F+t] L#FtE[60uFÀC*FW+]63v])u&+o3R8bt)+zƮ+ܘu5]aJҕr+M A묌]WJu& ѯ dWJˣוRF3j &IJ*#˙jW\Ji箔R)Jm,cM'nS1rf♍6~7~hgZō:Q\Z.8-?glܙ0cE#au7w+̖%5&-kӲs\ac|_6=ɋWᑬ'=:9wo}bS\cc@`#Q?yㆣ'G7}z|^'?9wّa0D"QN[f.;_ZnPM^241//}40ߛVmȶk-ɛv"8'G֑/b.f@q2]ZTJ 8] c0 b˙T\WGE)1UBI׎MƔ+ŵ,$VZ7Ӭ/FWcqG8A{08 em83;QƑ=g]=#TR1R\Jie ̺ b]AR`JqJѕkoa1̺KBAw m)RZqc(YWyG 9bt)+;J)<[b9]+d3j > 8~GJW*fJi]WJ)IJnld:JnefIgپMtΏzo d6'OF7Ѐ0%K(P1pdJ{Qv] f0cO-ԝ6] LU0Uo9l>ip]).Rtz0YWԕ+?`KJiiRJu5E]༘t5qcוW6J)u5A]ÌcIApt+%3v])VqϺ, 3EKZ`̈+n,&-͋Ͽ]>61 DȺG~0ceWC(1#{pB4UoA_+Tuf]MPW| ]s9ٕbtZfRJf]MPW>XgMAR` p)RZ?J)yuEv׈Pk^ pVW_rsw{u}W(C1Q`}ӓVF?].8:o/ J]`-e}OO~oW?|߻WbufqP_mG85Cų|~m;@Wvމ|}wóx l)xy- Tr{/UrP ?o~dUhZJyg/Km_޼YxS:e8 n:]|RFC;|5fk{-WSn)Z||W~\ br5=c臖JLs5B" "̄)n*f& Ɏ:Rz3j Hq?e}u}1RJUp0R])%YWS;!|&,pR=f:/.9[c۫y>pקOBu/ _qj_??V޻')mǠ쐾D..궪/h^=Z1?>|.7Wc)`D~џ!ޑ!+Us~z˷YKc?ؿG|>|{]!_D7yiJAum~UrNy^9guʕ#餩w,]A|[m]mC(xSg[`]ʆVHo^ݜ_5kwhWׯOsUտ-W6m6$ۅ6QhTwm5ʰ' m9 Q]~i\]d%{Ӧ:ZM$α5wǁ.6Tc uge[a[v'|6RCӶsgZIsljc8rUgjk˔\ m$8pHژs3"ԉZlʹE-mPepm]k!.lEъO@+wHR17m|kMulȤ8GJ-U٘.%IGWm!>FcF5vhFq׍뺶FFGjL5uUw{ |$-᥾zRnm#ڧcF{6>3+'MEtW9[ 1hGbFb_]'kR+p΁wFr!bl) !GJ@#LGJEus}yQ5S6Vu {i!T#kX2kJMGqI *vV亣ڪr-BdH=>VhlCI;nBN"v*[eOM%8ut~v-W16ڤ{]L|HXVC Q;z/ ,Ch]ۈ$MhOVWd}x!Jl2$g:Ur^65֠V1Xǀ(2j.(5sBzGhmFֶC`L֙**2 J~0 KFbuȢRFFg3r `kU5Im}]y{!(L*sjQxymǶBhmuƧ`6Ќ EKh uu-ʳnF( /, 1*A>g+N1mWF<6"v "F8z-2(bnqVU]f,>RL ۡ- 7k`*%C %0!k d!a!+HbEu+곱|L_M,3i}X9Йl^@?lFsj !J-Pۖ7#UCqn0A1)[[(C{HXDAYJLd\G4udCG] ƙEֵRЭhH8hC1I?K0 eMH I (AQa{=O _7A+EB1V SQ5]tfN}VXj Z]ioG+ ~̳2ʗc$ 0@URCn.)cSGSfȀ*_fFWAE= eC}!RAPS:m] 9RV~4BPA}Ysi#=K`+/UV!$$[%\[CD A .FZ6$zCyp*(_o3VK|ηm L:>a/{t QLKQD8'2&T'2vXNT[Y XFUja;Llh[gCzV0^.sUTdz|gu L9/t>0T(36 J߭;@ڕ^BAhPVfac Rٵ n;X|E `mkԒ"BiDH2ӊU 'ej 9-~OE# 0|IE^F=[ix3BElUuXMwwʰDJT٫`XwVTxf[M Dv ,};X*r'^/𓹊hOY.fZ`I%>bE\ n,zKb v{v/v @nG%\-./0&#Ttg;5)"Pg 2DWy(%w%lBpAtBPX`yP AB>JB ӘF8K]M`uX^F_?`%Om~q0hRhDdz0å^4R1{Õt5N3\lv6=>: 'r#5L.fҕfawYx֊pVBVo-u6xGCx6菊~)&vy2eIw䱑x > ث\!۟GZ ([և>D}(L#W^Y vН(rٯ/R흹Z1: Mmt Hv1fzaԱP z , d=$!YzHCd=$!YzHCd=$!YzHCd=$!YzHCd=$!YzHCd=$!YzHCK! _\S=$SP{S/:s@)}P,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W_TpJJ\Y:7+ =L'ǂ+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b XpeWNIp w&B6%֨{/ԙWRpW,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\j O*z18x>^P75 ‡߹2 Z A ba7> h04aA`dOrV~ * ]Z%;]JrևHW!8=+j+tBWXךz,]k;tu3c!\wanR{EWJ\eW]`3BW6NWLWh{DW齡+վ ӕ&"C+#]pfo:/tEhC{ʈ bzte2#k7tEpV{"1]=@XGtJV;5z_:u"+'kOu1h޵}0&o{<1 //5?D}0PGC˓e_'0OW5n_GMCGL3;?uFeuz1irZgZS"(yܤQv=8egV9+ [ܚo1]664/ :j1 :6#gO/Ǚf0}=<;ëjX_ݿ݊+=ݣP9SU5FgMW@$ 7͓|IŹeUCKk4]cM)`I4eTrj[,fDViO©(U\cjdUs.v.wZ7t9'gȶ/^n6@!L񆤧̇M潭nOT%gtDx']r-\ҭm*Q ZkjandQwXoǴm{XZO/l5sY`v_lsZ<Hz#9A<^wC7Y۷,/62iF|:ySWw=埏:-6͞o7z/ՃO[k,ET}Zs9#BԾ"](*:M"QNBBcz~:ǥrHM,!QQKjjc45 7JUb3.Q߂swu2YbreK1l~u6mgUp`4qj2>mhl4=\^iǘk.'^}u5߇t/iTb/-?ۋ٫7?v8F×czl76G "0b[KAnm؏t#f6)mIOAEr9uΙ9}U%numn뫦"VTѥ.ȼNj^r1I7 8r@//jmaoߞɳNT>>yӷ8jS8 ?o M7=WilZ{s.{v-~|$B H_.~;. y/ONFuQ*漎eR@Ƀ `c$Eyqr" ,BzA7 +j{Ym e:b^嶹zhF99U [) `*ь A\`MCH$w3pܮ{ۗrvAb-w!>g#C#0Cf!xvs6?&|)Sƀݳ#h{vƘ 7$dsJkUr/l.ċ/gg뿖m~n-&x6#a-涆¨q2vQ\wAPkޞ@[5b;"AZBǏC] 8MGWX+KN .{ WtJrr"r,WBVkFjtl\Vy6ֶ:qI$PܜT/FuRMQt>uc+tJ7f23o滃=-f#5CYMϚ_)޴y+Aclύ_a:zx7WwR,)BC&WBcٍx8t M?HHdkPTQV5`=[OeP f#Q5-ԛ:,2DZ\fINl'LbkyE50 ) vg}ȥ]>%!R:ϣw3|hB2BK1Mb1' g[:Y^GjN4}qk{䛾4)OE+^ñ>+nOo&=%6 A8c9z726FƓ /7ز+y.^o??PwVl*퐭!VYNF(5 nΐcϋºl=Lh9ェ!d"U$XAksXP\HSc;0I֡\գd7q*:7m8O&NOBW790{3 de`ߞ|!.;xS5XB9[BhSA?gcPg80*o]$ou7\)O췍xXk(vـoG/@W% 4 ; C,^~H Qj532z1G"c'cٓ#c"cٓ cQd4Z@g R!,Am&gmvjw勅,whGAvi_yXnT=Tv7J=}j| ft }!a5;/t QCy)Ra|Ɂ3B̏2 d}f*:[*rbϤVUA@PBSl2FҪhcKvNHc1et!*1(<ά@!4^7vs=(ZgR j>V-P@A1X}  R[XE_o!)iOe&&HE\ %PL2 _,S&"6-7 ǽ+DhXZl2ضWJFbE0a#;4|I)֫SmBFL֡{5YAAɲP6#ۂ7Y%5 ؉pc5W h-`|px姉>_7Ke .95xke#6q-𤥅\q|37:wfY9/(;P yoP)0kM!XfAL%;Z|\O;,dЉV.9'xUXņb|1ޭ޺'n^ΠagT`_ϨjJnwo"_\뛯קz BV?[$ZMgrD{sy[t4Evͻ)o,6qȑ15*A)qES}CW顜%&ۯg B3lX~=fdNfJ]Eچ/]gnize;}Xw2j"E\Λފqq+BGh`}Vgr(P颁U?.;_t.f- h<=O"sxu1Ȭ~" zvkEp׎1kRlΗMMv-T,C?Хtulo!w6kUNp=t콺6r{aP9{g^AM\Os19șj?'?=#?8s|^_x=3ž˟>1rxpzGhnjEݟK_#A+ЂcEc4uV[lh:ZE ^]s96HCfcprsc# ]8xAyDG$SRQF)Go -5{Іl&GYZuqB(& :o`L$Bq&:_W!cu&TmR0 #X@JĪRhZ7qM3fzi^Ӄvo>\}~s{wж~/VV:@?o^ j@-"s.*9+ۚL> G %n,CХ Z]Ny2?_绳})MpS{wmg, uT Kals"G,:-G۔ Jc >Mq?@55J_ɵX]lo֎Jc{Xk~zlIF0"0f,ąI7iRz56 XLGα@ZN,h73u()j-<8tiA mB?i'jNh+՜FU4*>qB}ɭZ}0@svZ Sڐ1vfM?}m;6YtY_5YbBk49BWEeb2*h@M}+nQ*n[V SQA鬨yZQ]sx<}Nm ~DP71˛E8m&cKFx~ZǛ"t{IE`.^ЊqT(f)s084D aJTsneVtּ=RM25 ,jA\XH٣\7v?5d3wd%"ـ)ڪVX.:y C2lifӆXv R5.K<_]8O[|}E\SfǡV;vq#YO 샀d5z^ +ږ xI&VЍ[d4$P;(X +A(3UOS-%vy.[o3b͏C-"t0Z"BydWA{VI\@lɹZ$Xl`v R/ʅY.S Q[fg2-C)I b+"vgY^u8qJnP:4.+jL1moJE x`_})36[V\Gx x*8>d|m`:z?T]$Jd}VT|\W98| `5PXpmI>%4\{wڽ|I֥H&e[N|IDZHE fzf{{VK(f`2_FG풔Mf8}xCZL |nUZ/OiƑ)D9h#$.@W& 0 @Lx'#X[RcF1ZleF͝QFGFW1XJBQEھCi T 8MeNl:Ea6fL%huא.hW3DR=e~bƣamЮd43ʘ9BS8vqHŌHs^Cxa2%8O[%.n޿icA3sӧzAZPo"лjD[樑H $@\쨾5;o EѢf7?05] @$/׍w/*_9g )%/qaЈR: R a#uT$ߞ}Z,qW7}67Nٿ}o>%_3٥\Ҕ!9FD<\0e7n Uҹeqn-)Gv+4A >A/3çz- R rP]鯬+Zy B-37ŷM`)?X[, qU{]2i,쯁s+Y_XcT*n[θR损?iW纴]?Z_2?$\qءM.?徚>y8M<뺻?geoM_ez& &wEzҧÄ.r״M3o6|!iSSfbE!G5&ĘTdHi7'scKpD8lm oGdy.?lqIU(*w@D\0pV+ j5sLޡYXbt+C@uT'ĵ7,Pj=z vmf[΃BUeIj_1DM,w5) 6UfW *!Frr)X`(AQQ>eΈh(6˽XAYWZ1G >oZk(y4 f)sɹC ^S G;iD'Pcz0.Kj 7NnKrcf(J)zq~(h(PE'{3 v8hFE/s $Y8A3OYWT]]QGrRpdpɯnGj6\h[.݁-w,F@.. +N.gGc e & ¼&BP5; jUTsc%bDƽ2v۞Tw0M(yPEG[}>h=7r0?{Mxd4Ue@q *cAWYqmε^cW:{U}F`RGSJmΌ6>Dcrw%1s׮sWŻlbWsܼ_fhJL_!9a◜^"?ٯ|* US;w 03vNLIg_>o:%f4T-S.M$(yDiBq)`Lɰ.'®;JPjޱȮ4bW `NO]%p>vUG/]uaWdí'sճmZ\̦UyVPrL]-ծ[Eb ާm'exD9 ͨHh /KRTYlpeWu~bFε$g0r%A&Z# Qz>g^?bO8}ߥ,7o濿nM&Dp˳MfKEaiflMJd*LDk0D c(ΑQ奠!Ӝp!}KETq3 9 Ƙcx4-Dh/9v|%gS4aw4Ki6.G2uDK^\62deyBG1F: lp6ߜ xDŸ.TR iR`*XddrE#jܵC5.}lT <] ޭ5,H %mgy&s TT~qQn-,R^܎ug!KeA<%6ڠ봘dIxdp;wdpijK Jb "b"FDF4MG EέRȰ, Ƹ#aREb"D4-#z8ti1ZQȓI%ɦui,^֕XD1 DoBP,;hxWB*ù2G`ee6XBaM*`4XGTaXK"2]H%1QMKc5_K@tU"Fr#u1XUYSN hZcxtAlXQGcGDch'=h_6d\a ́dĚiكxԽo7ant̺- 8 mZDd&#QH@:o;cƌZFLaFs+$ܲE5p6*]O8.RtX QZ*!`fh FU$(H +*&tEKAw^ ﲟ`؃C']6e]1(Fnr@uE:ʛkqܒ ߏWQP̤"NC4%.~~νX>&~~a.7S~TQ0A5TF.R{ǐ‡IN6*ül.r%-E#,t Usϓ9H6 u~|L[G%9 D°*Z:6G wXGemUƣ2Ar@cS^ebƒooW!*i#J6B #%m JЊn0CԚ]^o[?.&c(9.ZUr5N۬vEիYv-shغyQϳ;y7yj| +]myg֫Ot\|1pf4E'M75o5V])>(i9=\ y;Lgi{&KHlQfFAV|xk!LF-j#~#sAY4yiAv8n[NԹp'Ʒ,y$aJ[ɩ\J]0CC4Zbc^ 0h8ˆ8Uxd!R&R/5eDDL ` Xy@*c"-Zgޑ5.ӝnq7 8/&5'K#qVK9,޾x+/ç G)F{bd1&zל(l@&1rAJi(OYJT=IpWb3it_f:EtQg)5Nw}&ݕS]oGWm7ԯꇁ|z!{1~TK%R$>D=Dc$6MtWlwuh'S\T\[|̪5R9*pq+P"ʍ5kH mo~;JG 8QBLh_L' .$di~*Ǹ52qwrP>ޓ>-}wAfܸ/ӶZsjW^H *i\-^KEB1^PuTn|1?mnf3i.?*X6;tɈ5RNY}i QأHk玡*0SJ T%]2ڵ0> iNCPOk"a} tdӁj刼)sh8}m;ρ8Cp$@!h(o33ؠP$^t^!*q9& ."d+3)'"˩im))l,yXPQYxq; HzlS5>+d?J[w#/Q/i***oH#Kp<i+B9Re#wJ :E.B0ښR:mmʒdF-]c*1/Ps-='gX UUaa5 ue,= oM/f34aZjɟo GlL't)SJ eȆ2(ipRɔ);#32׭neV4 I0&jJd&ɶce*smk0b(ոc[6QkkۑYMʇN ,R~UfR"1[3Y ɸ U06>^6D!WdEG&!#ed.jR R"d^xX51Ƕh+#Gĵ:5y̥A-#U'e !K(HNWEDg9cd/Hp2%rB 5jn$ɒ&EzU,9ۑ.sj)^3د{?fCF,9V'DQH1kB`bR*v.vS'Sq: +7Nϧ[Џnﯺ`1ur.~:YG箹(a4<ZkE9QlzU*4*i%Z7Uu_3{M+}bpYaE90^Ʒ%JsZv^>Q;ufE$Q[=q~9Fl0$fv2_bKs 6p2v9m5oLW+|]>_>&nmjm\"$E|܌s6G*/]%o+ G]M_=H7%޹}&RuƩ$WkZG23c J;^rO >hЂTqO,~y@*=\l䐽fZ '0 $##LNqƧ!$Ѓ^+qӺdvW5x}[{UE2J:=> ef請bm pb L2K'| yлɃV[>g_Qw}EWe\Zk){)WR+g;R,og@,]7yu|E]] >NΆ dȄJNh-Sv"7oٟ+>7ĀW~a+br?a1rXy3Z8q$F }@3`xOF \O!4 *~npж>Pܒ!P2Lpz$h)~2pzu0kZ+]>2#s:x?I$7]Ѽ Qy\8yT\Zc_҃|%X3^ } "}i޼9~H0;EC.դ8t3wW_.i8)M/n% e> nZ[Տ+ o/'/ޭ*fCKbQ>V]q Fo98N15#)aD0aV#Ѹŧ,b'cφ5/Vu]vki't=TDžoX]ryP`xuiUlC56.OGGǷG??} {/>~xK;ΟhiS8$LO#=;C 4bhi&CCͷO&㚚1]!0.V4$Ec{QZwME*p=Jyjk)S8ĝ*5ųBd#A{I O޴/G7GH|+bsVDӑIj)'vR$׮|3iW%y!i㝏o5|A_5vBq6C[d{w̽cPWu-eqvxx+Pq i1!_wzG f ;_v`@İ,9,b$K%+eKN$UV%#3@\~ec/W>/:;wpc{؞!e>Zx9zTD<@S PIheʨs*IPTgwB:%*q-[( cZ -Vlm"\x1lE_ Uc 钳DGs[#"j QPnx|ۋ,y6ӧCǓ L]4~&8O}I G bäTѧ͋V2$IVXTYޭĉoۂW;l9-A( ?-i!bsxl1M"-QV)E e^~wig~n -^AWNFN ~AG~ۥ2R gnY䮪dᲤ́__y@kь9oG" `m5Љ,H)Ci207g!8ޔ~kiy>T~BQ^΁hצn!2,~]+R8-tEh0 /0#Z]2?6n1kv_o'_>]|?V添/׊GÕv>r g2 Nm~y3+#l!}4i`8R*@,4C$`%X a~YCC z*_zj_z2_yjKhC !J _ s[=$NJ>MJq!Q:eBѹ \g@E"Ykc!+6LQ:%_9ZQ'_Gx3AǀЧUMt?|M#O3љ @x|#P(d';i^dI{Itz 9Rp 1_!QNce_YP%@LYv\ {ց&UQZ4Z dX98ƤS* , R\>!:Ix'SA8;fn'dkin&H͜Ns$})'5,XYbՇWlsz{a5c_i 8_mlJBNv:Nb*] ,Nm ]ۛOD,˦ &a@EˑdԝΥ쭊#ÚIp;6d{46dpxƢ(hLMd{b1| ]g~mAod}Ht:.kBN⵿|x[H2YqoQhq֌?hվ'dCFu˖%s:y;[{˖න[ ۖ?7|ˊeUz4|n7F cߺn>?^ҨBoH_X;f,]m|w*kGLt~ng!6-5Z^sfS,M%86mx#]^=rrxteCW6Ѻc}/|wvxʕd<62I!摾Sݭ8+A{ՃKa=}1 ehl e)ayHZ鐓N מYQ[aQBHRdod^% w6Ӱ-``,ۮcZ8DC~ G{o |I6}`)J+Z,(;dphNǸV9kdƞ@:#soO7IO`SzDl#6Y7kO|V)vX{LPOhRv8;h>v7$Q37NԀxghE:I8a'7!Nho4ℳrsǒӄT v(U%] 㳐5>Nx8푅T)y8C\&WH$eþ QےWLFk 5Pp|aD^ߴ\/jgVagVQZ 0!se Xw I."dKXkBmMF!b4hU }²ij焸|&%U[3Vg^jX.62Յ.Խ.<.\K:da;c4Άr:{;Q,B*) ȜXI<$̼ uۭ@{!)ڔD A&mǬ\sLpZf4o^b{?:_V=a6N^c+#QZ9!Д # !Q@rC`µ B劍[`ha`&D<&* ``Bh{!0coRJT7,ڳ޶0wN;y%4M]WSB*ax;zRjc+amЮeL{lTY8($bFy9/ סf?]zjEo y#dJ$o=HXn!.#ENRAY}dJ"OS5/Jwt8N/S`Dw/8uQ#5 DIL5h  J}cRXoFXU]UhBqHcD5muN:|NdCR #Bvq"P}2jP(Qhj 0ᨣ0>oTu6rW'mַ7ʆa49>M5}Χ.'z)E0"2);A .5Epd^~K7utG~p蠂gᕀ^\nfA-faWU3Jg߲8.?!K;&[f4 7#KZc֦OqU$ŝ" gUDgS75fXrTRU|&Zճ>/?6ҮzҦU !3:1`uMq3xbf] eoyygz(nkݗȃ$Ȥ]Us"OYը^ʤd|C1Q&K_ޛB.|m \'mVo\,iAh˱X>3Y5RZIxd|u"0\ _W N6~i`W?77r{> %]2W.K7r5LTP(̜{g߳70ҔDPuILk&q~$~0ǯ&~0,Dj5KՀ#f](3F|" E+z>\}"%&J0#B@O3" r/15VpA8ޕFR^4PCGb ?9.a@Yx\rf6e<8.{N,%t2/\R18-U6n.?\m-1hWE'k3ImW;@4#"ItN9jq֡6z7yL W_Q}{Q29e\lגS.thb9_h&oxt|_:;h{L,c05%a0󨢚+&9NxQ >~IvYarJքٗ A&Lu&Ë` **uX߯^/[^ >ygk>݊` Ԉx<ۑKߎZɝQ,Q2(PF)B@9S eYxY)Ai{-$ CZ:[nXp$ODkS 80 ʚYSQWŎ?\ N,jVFwFAoX }VEDZRpLj)2 88=ɀxp6\d?/|UR7qliK_󒃷 13%}5̧: *FSo٥7xU6|pU8>Rkjf]w< paTxGDJNcRɜRafXTqXD u`(` w[XWa +Da8(("y54y9 ARqqam-=rSmM+ŮA+S+tT{T0EPR{)fdI[AjV6 (x'wފtqO1LpF@!h0R02V2VHh\Ԯd'^;Fٿ?SpҚ\g>~kxZJ>O p$i>CVqq3 q pfǝ/ ,(1=_t&09o V`$1" :RL݉õޞ G'_%P0 *:mqkrX/JMє\ qIH; Qt蛔hmvc )(y oNg?)'D"x@zV <ޫQrBf6a# i ;Uwg$~-\TW^GWof|Sb͗N/v;WR}73Α8do W 4G0fY>/p.uo'=s-y96|K֍Zݔ`H?p)g߽(CK*'~+镗3׹v 7߿{_}w Lś58I- |k~َǛW&CSV7Մ2lkƽ/>鸅1q- Jϳ_^K6 `w•3?JT_tqAADܵ/ Dt1H~PDP}R틥Ѽ6wD"$"Ƚ6%3b11i1ɝ  zOJkJ5(4&ż#5%*LUeB(it99xbô.LMnlۜ|Tu\g)2O1Ļ VӺ Mpm4 <5/5"7H\0L hx GPnx؝cEP 9$0Q]9;JA)htr:c[e)<_Z:Ya&EDV)B:b&w.Hjp m5h;-Q1䄥 CShªq@+帓ނ07, !),q+TH4͈P+kQigz!K@T<\`#Qnie2_:ۃOGX@qF)ODLFJ-7\!HZ/`E9ϛB=:cMlGowg3ugZ~si!wMervAWq](6$A V04Z@37h+oYq>mR؋D\zJ=Z S\Fm V;ĬW@K@؀od~rVk/l1b *64FJ&tHzkYR=xL7>q8Ձ`}>:6VV-vuQj7V;ړvDUzgڑL1TA*$.(5i䒂 dr PΌ#$˜l5m4#zc6)לE(U{C)hy7n!yqq wmfٲY } l33pda .:Dꖾ\z{ϽL%zl^};׷st ׷ssMkƩU#F;pl[MkO%:7ata^ gj@@"JQPP*dI"mCVFO|O|O| EwI픕J2=eQe/(;!G#%XlX LSqFS1G5P"6:2,"1rbrX+"D461rw>-!96<}/^:;7&ت>{R}Wo\)@1/X8w?}GKu>Esɉ) X&ܿwѷp(Q`F4 F.h(j]: o WN èTHq}$t0@ǀ08`1waʽр5qoJ4 9d@i)fg!ktFwӾB~jMx8r;Jry,p᪠Rp k1iW'D+VJ3+5  \%r9:JԊw'*h•D;:ݲc9Ɔw LT|VQ¶)V^;>*./c0U*Kt=/ Ve0L emˈ{lP3128B8q+ͫ2Agٛp"rs!~#ko9`E";m`Lgt'XEV,y$ىs$K(ˮbTiǖX!WtQPrcgdF6tφae}zt(Oiu: ($3Wj25qք%rym"B ՀܗX;PP]TJ+cыSB!u\ m[WqrlWqjeK~Fq*U6\W=DH@"\\R븪TWG+FP&!\`X2B+T`0NT=W*u5OW(qu\Wǃ+:!\`I2B2 [WҊWG+X)uMW(j*+T)H#ĕbR'5-qrU2B ק TZq֍#2Njj U` V;@$Ƕ'OJϡIB"[.eD~.ۘ]ҟ%}[ӭגy,Lp3 c2S**/d|:?ʌ̅( !mTR{%{ۺ2#e& b\jdy!XyR)\_{.839fy2e4Z銼 ObWf`P2b(̩b˵غW&")`ȥ4(T;?#*v S?f{1ᔐPIgR9Db:F\Y. kPHժ[WE/O 3%3-qrEޜqjUKq*MǼ9ed} b OW R P. ն|k[z\pU:Bڦ+Tk;o]JR#U5u`+ PnIũWG+!Lj0 衪O Jiz\!$Bфpeu]ZE:o]JNz\!dFۄpSjGmST6zWǃ+qX/h;4~I/hT{A CiV_n)px`ʕ,kնu[ʮ][/bMiU 3dVP1].-qe5J'+{MǺBvW+aԎENVp#X/hZvSܵaTJEJڷEw?W(Xdpr5KWҮ T6O$quP\>2bHt\pnHe4ń[?2NnۧEJڏَqf"%c%,\.SU_Cw>?F\Yk(KiF7b:Pnۇ\Ʃw+cS JZ'm8}q*e.h=-zF2!\`)L2Bmֲ1Ҵ#NW2I2B+T+Iq*qu i&+kl*ʮ Urq%&%\E]}}R=^W*S6vE*Bm92ĩ4Ju/h;nz ]S%R%7u΋Ci\ Zo%&} I:s5(%3Avݚ@&њ0\%X PJf% ju\JqeV##X\ e2\Z{Tiz?WfǢ7.L-G ~ >8e\ũm˺P ![22=-zʙw19$\\SժB:B\1.y,h:Bm_V UjqŹy *\\+RtWR1JiJAlD2m/8B:B\I!$  @JWu~0*quRB 6\k35 նuSJ[WLj+M4kUq)hW+fem2W̘a !֠nU81(mKt9FƢ@򥺲O/Uaܖ[@V3ץU LΌ*@-$\)rJҖ;5{j֝ܐX٩T  =5rhu?bDI9I`x2MkI*MP@U 4pi4%\`Ϊ#Ux̣Zy1TiE#ĕURi@,\ܶS;+T)z\}?;=%0~8Qri5N-k Wq*yn=-zTEr<$F<ͱZ—dqٵgS??^^O۴? KfvQ7dydP#7TVHɠ.O-5 ñ\E3p_ruq uhߞ[p osj?. 1o< hA-8[:U(lV=ߜo/ЍG_pۗox0j|ϻ*(x5)P}>_KI,x3 M}-I|i8\M*.[p}: 2H6ķ`S[7 fjxH&m6ʦó3PAwFsWpŝae%sCb42'@1-X_]4s_Wn;Cٴ̮)ɠ9?wLBqaϽRB0-d)Bɩ^[.*)9OTAHmr>bg kT˪ 3?TR&e=\'k\S}CV5 aoL`_4Y5"/Y.0:^dzcǣ|;^C}\-[і8:[g+wCt{ݽo ^W0l~?`oPgP8?S9Ūw9 )baT[~$(oiKZ:Px)TN 5VQ!$eB'³`LxGv3JYC @eYSΕ/A@(*:^J,^k$4NΔt81SS/8+`<I Ϝm ͰH`7Uۇ6VB< l5ޒ5<f@<_itiƕs?ւNHlSP_sU+x ~vSWW௾r]Noyr> z\;eْF̯*Wxʀ Gn&˨ˊȨh}P:xc[CԕSR9r:J«յJ:yAv8Тcޭ0˴n {5N B/Nj_`\Mǣ+AHƾr\\U/ux$bKO=|ݟ3-B27E>J2nVGѫydzye\YeW6TcFM#}r<77uhJlHϧEmY@af7KZɮϟY&ytC&!P5B?;nK\F˦y ̒G̏fd2{Hdx1"ku51* /Z /;h Q!f}g<9Jч;:PApI>09˹-*.^Sr%#e?sq yTsݧ!+Vn+loq242ڿn7׸:u~~z>yN [dٝ3)^W$w]UtMn7Z2B>l8j2z~TQ 5Q-JE1_mHȗ;VT09'n=,p@SLRHEI팁( 9]3SSU 睏@b@ 0Ttm76JPB"SߍeB+D6ރbZ)56fnCfLKiVqv|_rמS5YO۠dyy?w'[/yvJ}|[w<,5E;T5$)Yd2VLY2$(b-8}[ c+9Ex[sl{|Hٗh(qaajĹqfXL31 wscoqV> %yZGlʈYf-Mcʰ;THcРss$ N::F6Elk DìQiUaS[,D ] v‡ [>'Ff܎q:ɗ\}Afq_vQ =0؛4u2:ԝx#G $u*^`ao6fvcn -:LfC k!#JcM&dm.Y&Q|c.uc+;^bcU{@iڼ¶Vl{BWwhc.:ۓRޞ os0QL Pآv@T2WjPD{\՗iaZ~Qa.q E[e;P];Տ> ?c>3j2exk)x6 ?0iݭk:0)2Kf%BduR~"ΆwwO8{WyEN@OK@_O,>w>zmr^|ț,.f_-zlXw/C_hy3/j}v\K,f|4Z:g50.8檭fiوqt>=]4YI"jS-=^{2]rAOM2Vzw!Gny"n2'h޻d-B*?'iǚT aV}Zd,2EK Y>J`Hm >eG,`Qs }2Y%Do&LH ctҖ2ʘb!Gf"՛:?D|WZԼq =g˃ͷ[C lk ׄ:?zto-Vq&·l--Q<;eC m̃I+?kg^ԀP/)ԳS썳ZK|N'q|l`:uG؃HX^Bϟ_DW:mr};S7>Kw3ask3zwQ79;QFWi5!/ϼO|8Zxy]# s'e|hlux#t٢O;yc!֑\?ɶaa@a.+k6LiOSt2>_?Ř'Njn.GUQ7mmZ+ \hZF639.ˋᢖu;,O|ë:ׇR×y?c"a}݇6F͸3{6Sno@Z6caE{Lf/hFàUwT`r+. 8aMɚΜg{S޴Nܳm˛J}>]nk5jHyv&mgl;*,.y]-~q %BaHJx%!=%H( .8E ɦP<5Ұcދ*j4a^  <`Hڈhd5XPDU)^"AN\A;bC'l T|F_,EgI.G6Lk CJ~ / ((]AtdP쨶C"+ bme2e\}:{XK^E[$>kmP%',%"k:$v/0kTkBj]xp:-%LAp܅Pq7gj.}tL:c,JM5.0tbLckgoU![K?V,UUr!I \,a{Ȓ0̳e[i0@IYɔlKI)YA'>MG5P?O\r4ovP+8)OTq`GOĎ4Xh1"O˒Xץx[&&>G@ژ.:OKQa,=|"Tgz'UPɞQPA19Lړ¬]3B:BےFa70yWkq?6:zN?UaE)cOD19!)j[9.$q8\ΰH1  @Wh((@3o?gކd nd@6t -*e-ǶLz"JeBXA R=ڽ}zP)*4cv.j5S+ &TjX|%M CĘf@5ykSP.mIhjemem C- ãTX( P HLVĹ=hŘ8~#=jF8}?I箿>yp\dZ1۳cԎ85! NZ*?ElCB`Mw޾8nGnIt+5voȿqIܿam.'X+j[Fw: u,{{>сXb"cGKTkMY|;YD;Y,;YId$3IPTd$t*c"vuf>b~V>fZ>hPHBP^U$.4fĹ㾺YԱ|JO$OBɣu:Y]G}:}<7PPLIfKW}^FBZ]IrҒW޵qdٿBvUz`b&l|  Ļ66EIm%M6n{N=}+յnZ]׵32pOW=*F|맞ep']ʻRk*9l?Kab`y``*5~YC ^ j\xiϋXhG'g*?;O?._^5_|ǡ%y^NyJK-b@|,0!G¿:XYW׾ / Ӆh70cni`G/+_*by}9]ϊABavu0~ww~8x8ˊ|wv:Ϝ_/W{/}wUҿXD53\{{r7q -@{x3A~} ;TyoG^h’V]SUs[ǖüF9$;>n/4{tȎS]R(ݺ,;kcsjU TR%=&U)TcW}"mԥjkk̊wK= ݦL.ʾn4oehkI;v!ʦ> ֣B*CIıt/=ݣdZ~G͖<8Q9hF}%;_L4,fLFJk'klP`'"EsOE& w*Ĩp)hZMURn?$}+kFԍAem'di ў!CQN a{Ts&^=bԾl1gi j͝A/O4zL݁Gxb/Jo*#G3yF]qH=S23d_"i]fO:4eyu-rJlr637;Orv%:h.\& (ZV @\K9jk15W IRt@Gd28stG- ] F/'Ey}kZW`NZk+ r:%wֻB-=YXyq\NN?40s =.tdm9],.eѷ_a|Ͽ4+<FՋgC9:I)Tg'v0lGP7q> ߳!fɲ4[KMxvmbj}qoêg?'_}K:_ V<`] .ۿB$j{sK[̍Z+qWnͽy##ol:Z]YN8kW0r~q(cUug$Fb0N= -dy#3>y^-?.ۑݖt4$Gvo}An|M8oPP+G<%mvrc>nє{0\6Yc˚S #3*M ׼-?,NO̐;7O:++ކ՜ܦ1>RXPAG5{!r٧=kƝ;}sfY&z+fnLGfQg|uiE"֟qsmUL䞓`"&{1!(y_`v3+>χ-}+Ai ]}tGψ"ЕЕ s GBWϜHVޠ,ٚ{ip<)]m|ʨ ڝV׾NU[Uo;{+>aED\^z̩vY7b]ҷ3:n=kK$7gd՟ֿ6=/kMRQw_msnTɑI9E.Fu=MNjnjAu*D*Ufq!\MddVAUCuy3dDxͅt{oDkLbD;hQL1o c9k$zwlȇ*jke()ׂiR=FI!Sm;]wh̨n׈fh(^3նآ^rJʇwDF0hK}5#Qn\=Mu]NϱM":IFQU`#!x\*Qs%#^2[`V: w} i0#+6:fDyKƜm,~6 ywQZZUAܭ75zpNTC*=1Q۪+۽s2J*u}gt)D:J7EߢֈS#EW'qu$~PƠSBkLnJSƹ,ړ&͋J!ĺ Q]Bkل"XBM9Eʧ: VA&-Yt7H!Q!SӰ=ԆR Ӭ6*ˌwhB4т)kCFnQxiTv**6(:ݠ-!xv;^vڸ`glT& VTb%A2xBgҥ[ǚh[]Q%EC$l Ekck.loej\ h $ic#sV6+T)…]ٻVXZSQz2}7}AÔ`; Jw5Aj Jil2k"&4$:3:N d>bpP6WFoJ')CidLR g3 d&dVSJ+h;>j!ՠ໸\?7(c:oa ` B€("2] حvo+FK75g@x`f#~bBۑ !.A ֖fC)QYՇ:CAN:2Ů= pbVZ h޶rMEqf%RI9n,\ B!IK֪RX6[O6k9 |F*A(u9"D5{RQR@}J FhyqJ5f!!Q6i]F%CBl,@%Ycŷ4,r}k?VܹgM2샋wƽ@F{UqcmDr"h>q UllAvhNZ"_t 3BoH8vգG L˙#X -In%AuPqP>m:@AVLLIWJW!İ ò#'q.|IBgą՜ Hd"Uk ZC 08XPG4N`RȲ5wdeꖊ<o 14 ,\]TBN5~d}ya"bΩfM ld-—b$ S`M0KvޛAS"QP}lG_I SaʠvEx'.CܖRڙJRJcEX 0|t R+f1VDG7ft ( W!"mT6ZcT)c@sRp@SD-7+;dcM bӪ %_X&c&-d d, Rs tM9=^4}Pk7t&xs9;J!@N (Bժaaв kҠ/zW&#Bj?Д n FУ.ӠԨz!,eJ E\Qրn i868Y9i̦rνm#I_;ÚY8YW9:ޫR cdRw|HKdQV'q,Ac=ݍ0UUT$"L0;# ,kR{(z.k{Q l`]<"&/Е<]a0BAK%!lS \5^)Š.rqp8Y'_[WC$׌e0M)[O/X#81\fI V5)wmYf <)'}N7LulϛP\>$F}s7רCݤ@<H=FH$@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@J4{H$C~@ -?3@Jg=J'! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $зK<$2  `H V@4H=FH+NL $BI $BI $BI $BI $BI $BI $BI $BI $BI $BI $BI $1 d#!@ 0{@g<(E@{w$! D t> $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $#"ÿ[ћAJ5MO]&xnc(N!JR@`\>p)I+C!]]0ky3XFv\a +tŽ0KFb-~BğjY}V+3Qewdz-B)6 NCy B0{>=0cIT˿}EgG<Ѣ N+X^ҘU*'0>N1~CIizQ^i3p&ϓjqMV}ERl?p4oZ& ^=u}}|gz,ѯ|>k{O~ ߺ%oыJMZRUWË81eEMC or˵02zM! q+x B pM |֍ ~FiR: 7stQTy)F#/<̟qi~?j҇|l˳4~/Eх~̆E$YՒn>yf?^m5}V(u]3/ovVe=+0Nky!QjwR4:e遻^)NyX]x^\DdJ;q&Df#ϔsZnj*YC_ BpA?Ű*^qqq@ּifLx Laeuܯ:qOVw}JtSUq>+.!K+Ԃ+a8e*+ I3sotd(,0931\ W aI"KIA Slu. h쒆^&dN`X4Bf(ys˖V_.>h֑b؟ǟӽyx:z*lXŮ?ru)K.ZQM@2z%/7hh;2(vvu#w|ASKb?R?|JVuf6|{tf%u*X[o\}ǻ>@\6<8c8NP71VNMY}7&xELt8ϳx/Z0BMPz"9$d ܊hF #tg[%m4vh]G9T_3X0`:Yn6j, w~vc&]JnqU={vf U5 yeEx(쓴9WIJ#|eSEY荷Xko5էKwxƃӦ-Z(aHhK`t'1X`1%+m1-\i(JknLa|"etΥjBp*7SS;!}:Q>EŶ|:,)WKgI:Eu,Xu,I B1g-2Q,!8hO5c=`ㄮ RdpC0ÁSYΧ}|\݇6hgYLplX(&^@D0@~JtG8J4:O[չ\J[ A$S@`s]|{q2}WO^b骨A# $FHӉiעv/.rX O3Õ`E y=Iz\lI;fz$0ȸi9>.~@%2n|j_!IEͥOwc9*s*۴1iL,>引cluނ1e<4l<: 7a1U`̅d 5Xa+j;2NPK';NjP1sXs`Wh|dT&x&Fo&E%6͑  eDRreӔ,:c%.tr鶓zz v 6kYش~Y#֏s#ޝz M$b^Vfo ˙^r^^^kE+0u+9l:^t/Ƕv? JgPzT:KosPnt^fsR6fU!̠d֌ݫԼj^^yK5/<fW7bϼ˘[gn^订Hwz7CsdM|vL;SֈvH8 F)&&kAcvmm-1Hn -6M~FxW  ڏ3*"\:ȩIfu^x&r%2pB dPklD NjQ:~!gi3@SxMf6[7AuNE.`S- >!x4Pq&\1N:>$2A5h4a=(+M΃%y/at:ZIi4&ՁT^2&CmPԥ%XlH]num%J ʊG " i|hTs9ShbFsq3Q0}Vܳ%3O5ϷyI)VpS& B鄕O*Il4\+7lm4]9U5q|ϚzAtX0349U&W^H!"Vb g^*vfWifja8$chP72zn݅l^jC ^\hξx-G5~s5O =D*YNC 0`Vs tTkSHCtR *-La)f*U =s׿?v'X 0t\6 _%ևєٸJt'T~eJ\v` Ir:NEp~r[ZRz|-߬@SI&Io:i?4mq7-ZazMsh2Ө(0B%/uen$&ߕ߯Kۋu|f1Ʉ;]rՉ[xvV#tspP%ݠ%+yqMAJ_G:Yʖ5nvy2 BLUbt*/;68;KڕhORZwXK߫v(Q^ s=cK0L'/Zx/#?1?I/ Cٔ3؛?L6-c7r p4Jå_YO5W5ֵkkPk1Y;wvMl}q2~-:Rj=%؛QO.+OEzP`rNcҖWtIi?XRMPqwgǕ^>8 dv/eXfbMR"Jys]_P_F&פm/Y+fݿvq:2? _I-/`t`:laNV vTU+Onw#;Q .{e_|M>*Iz658߃ǟ[ Z4n`q~;}!(U9N{Y糑$0?%RKfL2Dx3Ώq|xy\} nؚKo޼[__:#ynke}a8¢۟y/ߕ5ȗ/kv&#fGd\8Y%,\Sz͞&OtܓEr7EX4>kg9NzMR|'jj24o$C63coh; D.[Я+kK:hܻhצ԰3pMDl@+2aw1z4k Y7;ݗ¾I67~گUk>-g*p RD1,,:eI!s-0kG?RE `@=ٞ"봧H8ZBa+`"zb0,+^]@hJF/eK[jg{8W!‌ԏs6AN@ЏjYE*$s~3$E=(TS1NWwWUo\q7>W8zvN/3/}Y2ۊpu>gʀXly3&oR]e]<Pjd60#\#D2 d N4#i;cϜ-L d)mǷkg{AwlRl.@ۛ0OdXin,&e5N4}m 9rq:,U G)5AdD>eseq 7+e[{mipG F9Q2Z[Lq%HH23W@܂c\+529~Y;+mݥv}^2w#O3"Y¼>߃i[G\76:?8 Ynr´I~MzTRQ[mA5d͕uKWtVUp C'zNKw\OM5=gpߔԈ xpwVavfg{ޗ2kR?qV{ ӄT :vI.0> iNCޗYۢ݅֨=X&)}qT"#ɢ!h}(o33 /:*dnVbf`NIY)'"B !1B*i-Bv( ceAF`3!X!<G2sj[V _R9Q ǣ""@$L1`-G@(J"CWwKXkBmMN)[[$ChU&eDlI5sB\>sfUj.ԕuu}.O. 2^ܰ4Ά=pqx2}'t`$M!J9EH SJ̧J`@+^UcK[2F,ƞOEmJm!L&ێY8su``]5vҊ/ZwlMemz#ػ8x9{&+'Ag"1[3Y ɸ J:$U`hVy 4DȰ(ѐILEMt]!rj2/}e}Xak/Ÿ/lmeh{k;15y̘7r<(lDkCҧd,hT 5IYً0R#emLJ~(C jH1%MEz!9k6ml'%EWY/^/z.$ IcYˆc&i綔YJ}rdL9TT^/B/>[Ոp/և;PaҒkg G'\6IG?>I$UmUG?v'%+y3&%Ng yJd[ml6Z.Y[B@6g> = jv- Q|aQ1 "jNem%Ha3Fi!de%ܦNjSv*%Ll}Fe]=S/fg6JYGszxqˤRĝ 9w@v$J hY{IAh|n4:εsR¢S[2QExn*jIrdPsߊ9dz&޳>?gv0eTvn^ Q1 \ȼ,Օޚ"J^_*_tz3]y.2 ]|a9?IO)|J?n]]zCpFrR$J+m1"gBzZcd{ܮ mPHz1ou#ݻMB^?۹|vzZ[ݰݥׯZ{_ZޢD-:fv1M;A׏j 2Rq6ÃqbrY\n27`8zZYƗtG" ӆH?YrAq5Ra}`_UճOR_! ~Kk9 B9d,)f6!fS“Y{e }I_ZrҾ WV+n.aDngHkxr>+6m' GC X k%A,hXAG!Kmz܃{,w.`HKKAs[='%K>֍ݕJq!Q:eƹ \g@E"/uEY`?aa2׮R=׵>9vN]X{_>P?sE@W/=y"Li[Hd:|>l&f xǙL rH: iG!z*aUvªU.˞yβc4a=  b)xe%MIJY᤺.q[x8򐸊.rNgT;5T &B&-s ٠DZPwSq<.%܏nhЎЎۭ>^Ud)xu!hE(I~mmVI `&5.⩃E_Zw GoEN!鶢e}Qkv<ƜeZm]KF93 ,S4Z8 kceH+DfcF&d:DPPȅ : EޖRN(*u-$xNڇbaR_*Ćbq]@gA7q"ՃXxciFAజ dGD)%)g33t ]syi)Wx.4qx1-kxiXI0dgd`0M61ܐD'A@Mx$}< U. }k# &pU=nT:RltkP*ˑ'>.-ǟ(h-&*˃Jr[g'aQ=yYdDiJ !&n\.ad/Hui#a|rA(SLAgtlTIDA+ SEGj&I,<A5|<-|鹏_/_ݿ %1r> }lSyp[%>N۫ŷZ-Gx~JȶSA)jr]@-6?.hPMN.aT~pOŵF_[?;o/fެ[!ĜO'<<9Ѝtcƣ.fmbq [גk[u{KgkkYew$ GV?Q,x4e'g';:+szUnuuk$p9Dž_ǓX^}188|0KRM<[Qw{O?;oC?qaϛwo_ӊ-`0yJj[4--lҴoҮ5%֘=% )~z?ƒ"XQ6Z.WnɃ Mb~;-UNaRųԪ+ޗ RK&;mCف jW;hl$۽bHd 1,0AR"gU*!H 4f4ϜN;αw6{i;P9ͩGV5ـtLqvq7MkĖMޔhn;\s%bXՇ`^R< ߄R}NʗȚAp9[2)F4iɋ4ɣ3[`Q=cKtuI˘fγ$RRڨIT#K)OaLm3D;ڂ!xouE'S@IK9Ē-BX&'3(fE%O&2&9EsĮYʠ<*kW>xP_WA5d7mƃd/j vJq6CqsO3S_]j7\l:FWEGJ!7ANfK>v,hYKei-f`cd{}R&IgJR0IPheQs*Y'&gsQ*q-w$%VٮOI_f)dzD)]{t48+ۖ_1&;T( \*FjW(T +F|;#k3RW+g\sQWD-ٽJTAu1 ]Vg P>*jPiMAue1Jdn}0ƟVfvgh:c~w<›e/Kw!2/.`YdΘn9UVUeed*: %B"YeڠvU~8^/KF*ZK3JZ:!zĎÍ7w9(Z>0WZ7^&j!+z z dWU魉O<19-2Spҹoނ)Gd |bml\Ǐf#j;b Rt6whY!=&l?*UVCB<#\ `uDU!c+$PeW \-gla:p jaZaT\v]z =*;~4pEJf  up=•NވSM:,7qaK͝)ה9W~7A4b&Y0BtDK>Jd4RKb8&eNqsZFyZ4w1׹X//J5GDNJ!\>c!H_ qRUߦIn{źͯӁr܊%waZ'RfzEwTx}=i;I.z~!\5K/Kʆ<ٴ-nw>@{,tiZ~M(O~fp_;XK/]흡EKN\NOe8:˒Ze)N_&+=w }rjP .ҿEr#lbە zDb@HчดEf)99)xG .C9[$whNCJaZ~Rz) dQ/!h(XcDmxyR[/%+NI][BOtEP`` JMmYP)Cj)@kS,TlOvB -W]&M5g Zo-z밈;9o v!g6Txګ[qi c*o͔ʑ%Z8i+B~(FZˑ;) :E.Bkm*&C`㭭MJnFL.1ٜ'\qL 62FfdlOWi []P½b5E&WE3>]Mb\}ygUWOOGlL'tA4! P̩(EHH)1 C0R+*b,i`Q`Z-duYH1x[~4bFfAZ01[]Q۴ڦCN㉗gAiR>8g9dR"-Y0AHQUS#ǔ_~""WKq,Zsʅ]*$~^ޅg4mo:T|VXE Q<fj Ш6VzZ܄.jީ^w=EmDKQۨLf>E0Ĩ%QKp*k F eKܦvAM϶Q #UېgH3/^ h`:2}fe7@kl= >ִBw w_ym1[ZnS&g{f{Spt,-;k#rE$2ɭ!K.YV%8:r;sTTֻLԅlcAύBE *Goklz7 ǣp\;Z1VSzs=#RA,-*<>wR8ڑ[j_~=9hn6dB $:p&8&PF#F+Yk-AY: a77!)j7{.&evØr2~ yc[ŭSvf{_y/'LD&M#Ƥ0Rā^K!nE 22Ա /ظvB&D""!Zd# W9Z8N=RtLǏ(s ]2"/ef'KD(Z B&;4xYTKufnMŻti_O]g׼c=OcyRȸJIt(,mlRB2a<p49Ky5(H).82L.2e2h@s3 yC;iyc[;ό/.A$Ҥ=ǪAF V A k88H(b^jtRߚ/~.kdv[ š(CzRԶew6H7-KmxMw x?[W=P]r4[[h#D_ySo)fppQoNTq4%zLn6`#x[puޟnk1iQL^08?LUҫ޼?_$0I9qBHwOY/ziZ~n?IO ƧTpg0SG'cDʽJo/Aγ%푔(C@QK`vF:Z+B'W"Si^Eh{7.@w>-Ioی{od1h~O 2·]1qpޟտJvM]fy^8͒A)jYˠk2g%#`rU5sI%i״^i}"_W\inL?O/ G_*φ&MAӾU-U7JB{na9?i/<2Nd\&ud8,(;O+~$o/ S" 9W6rfZ`*SNLy 2*bZ18&II:Mmj$9rv>pڿ#f Fkߧv&0ѪufEF8Xg=XFE RZZjR^ۜzf=?JK*-)~8{we֚xjio=[m]Q6uf%0%Fxi&VdA5R0w\|>bJpK@q0iH&N.{8?E>O/NٵOzI\c׌aȜX\MqMi9xߤ* ޕ\ۿ|ȴU˭M `0AL V0ErHɶnusDbNuU:]|?Ca }S'U1_UR|tScn'rV~ot of޵inq|-z_izwQߞMkDK}muW0(̹~YmY|%H1@W8/u-%tma% 毘w5ГEņҘ^oͭ:u} '58}6I,b>Űg/or,_\ P}x7w/?_~xWû/q~qSxx0_<?oZfMs 4-oӮjkڽ|v+e[n8!__g櫙cuZ&jĽL"Aظ몍I᧯P:*EBP\KB7zCب{HtH P-DF!XJ""M:ȘRl(0P;cãf"VϷ6Z/}A6p'+=4\DDJcʘZ}l;#p1-r1Ib(Jbt_= pzW? _&~i/ApuH_? -^xxB& GdE{XΪMj*ճڶ쇩 R@y|Qh+n[[Wkﭷ #?3GU6 䮕lJEN꟪_T[kQtz`lfWQu&=W?Xi%+syB ;:?t˳>`n>l9 \D=0Ej8++27) r.Qm$"*^.ѵm-|Xv]zaƒ{[rM!sd#n5HAy DR`܈8IHAM:X.G~hJ=4bZ ;c$>ipTdv `>_L )Ao#nx2Gο9;gnL(k7N;2'to(~bsT{/ʯ&-8RWr] HH!9M310#|;[HvIb *l_ǐpoa#ͤ;w>E,e?T2"bCŗ8(.c~!(VƏ\IA|vc`e|wO}=^Q#ooג ;FBnn:J!CFyMJ%X19R,9V:QP,X''hƧXU4^;888ϵ8}@4lJQmpNp!(\X " I^Iψ>1ǔT4nu)&ӎ9kZQ/{܁Mo9 Aԧet?:۲Q(C@ )bKdG~ZF'hOq@;ai &IErLR 1 q< &Z#3F@7hdqAYcAId%!1HA}y/s-}fG0=7ձ~l&0}u<\Novx]%0nt|@|5'8q!uqP^G&XNב[{+x#H߈_>JrEB'l1mH1)I~j“oipZ%<OEr“57T;6BNIE.qP@u 595 ?[Pv{E#QsP} g' /5+BBTi:A|L(ފg0z3N?Gv'2E%\r/ϑ b|U|t +o85+׷x|ެ7t5YܭkJE֖^Zk? 7B J\lVk6:6>em}MĻͯ9=]=kV/;K_x_> KwnpfW7b}fΣ@ijwG '^o0;ͧ[5oaZwZ~n*mYdikҶYQPS+k9A,Ԩ%^&׹ؖ.ZUBdDeі ;gbWiO8o΅;!ړNXa< DQ 8%X}rs*8G\JC0Rz&+Hix0J(=0"ALjM<(ÁDVHu R[i-Oxr4~j)tt;;dVWs}ͼnV>s,BINA销KLS.E KI(*-&*+L]#!s5$iᮋy9VO6G/t o}Қ|LWuJCNotG&S.&Md+ıh5i{ȌҨ.ML)A*vLYm3`ŏF\eBz9x.2ʥWOG\)@/]^y9Apïs& <US Gab8;T9YF}yFg^cD!1˔_#C1_ӣ%/YNU~_xƝe)9kdilHe6|<PK(EI]\OUe|*qJ)|<-^-)=SMZ0T[ "N?%3bM( tEwt ۢ`>noVz0+dܝszG}z/QV{IA[c3 A9AM̵(fgh齳2C+C٬vSkֻˢerN՚Ѥ0p㦵Gi>z e@riM`]:_ͯøH}SߏV@;8y7'!\:DXSfv5+GjL}!ErZ; ߵJb[J ޝս}QR=1yE5}E*PW<\;=zCeJ-(:.R}}?:xqD\ѷ1k CVQ7 t4Rt 8Qo}wzkcb;S2{5i5}]W .3묋 EE*VD:5TTzܐdPg'{wJ"/RK/bGkKjgoxZ)8ۮ=kgz|J"/zOmn[:et;t:'N̗Y0"i]F,t,Q)OId{\<. v{튏.py zgk^6kg{K񢌢)ܡ~|G["v_҅-M*/;( VWJfKWurr>#e] r;P^t_f i1%͌*^g*%˘SQKNFo\aJM %%YaEɡR y) J`pIfs!d;;g2ț^GTwr_ c/C.oߣ6Bk׻LPlqYS>؋˯gԚX.(V v!@ge ِZp\b=Jz >t1TP%pk@FJgoo,Tp{uPn)f{\GOY:'s@'vφs#irN{W~h3aЏ,)bgݿ& }P)@m }};M{cN{=fe?d4a_BǠ4:9w4_(S@#X<Թ,OIf'.Λ⊌?RPA4yl#K@k0xM0JŝvNH!CYFT& BC0^(L! 0Yc!Ap!"uNND.$xDnU&( ^9kdR])9`ADЙ88B`3N%M=~_'7\`2%[jyƜA#>hS4TOIBXBˮt1)M\IeJ L{x& ==|kȃISڲ]pI.~m:}bfɒېE!+g鑧)zHw{8^<1r< y6Rx2 J2X/lFkF8˥"dloYMb7c]w-ZlvW ǡ8C Ho7e07uhB3{ΘL{liiRs/_>y(*HFUU_T4w[ˑDu<; O1|WRr5-=-~2l-#σ2ɗ7O76׋|Lj9#x= cqnL蟮 VJ-H!G0Ř8̯'&"x+R>'!(u/U4wެ_pmi59P#k\ 颚O%TPL~ 2/R,7_qM X~F||>얁ɋ<;v:69{ҕldYZx`tK+&ZuzC=r}1:v3Z/-J> h)Xްău,έgeFOt }{I렲;z9fTLgbګ9"JcKHQXB{|Z8_ ag;]u=V3$q'P*_zވp0iՏy57=K8 :؊ _ RcDhC[[kvbG׉>bfSC,g?qoQo씿p[.\mS˱ s~m(V8JUމg Ȅ/B'Nat-S2䈠LOAzI:>C>ypI5HBuz^Ϧ*jA;]MU(v\B%Dt. ]D#厜F;"tw0>8˞yePW 3AIJ7r, x-d$]o\#p|=QH !O2C0#s]g- .o'gW+󬿇}xkI(?h](Qʶ* ]vu݁Y_}"htЊQX-{ NQTY)W)gAA r9v.?rfP~t̠K,EWs&өpf?/"wj.GLf53NB+[Adfz$Zr!Hei'#wOJPhoi]`=t3qvq_9$穢%F̧ ~,XxnҭeZ֮\_r:Vԭ!So ^> $$&BPF+A79uH$ *z[:f]B? ?lXYFF% 6$/h_/~њJ 7n1Ȋ^ yp@3yeg I˛E|d"tQ&%S5; JB Nd䊝Z#%$5^ǓzFxy#_-:0(Ҫ Hb c@M4_fqZ\Hv5 iC˂sz_O Ї&;?B"ʹ\^BMFxQ%Ql I,_<'Nіg|gP`홖gؤ8́?gg^v6C]koG+iu7b``F?e_o IP&@3}U}J,\σQ} fٷ+Ƒb4Ձ Fp-:wG/9!jO7.UöV)Zdy6ѸO؃.> ;zsp1827kedwFnu[[seVO|N'>}=8YlfuPNv*qNVcODž\'G^7߿OoQ߽W=87[!CUyՠ}MzW5-^>JŖ[- `e/k7ՉG>hYOG 'a e~QL ?+B1K/B%B4{ 9T/ Ixo&-b~&ґqϊH P-DF!XJ""M:1ET0yezwYh|0Jfg&R89 z j%Z9.NL3VmlNxBzCNvZ3;PscgvN㶣8n;әlۃxI9(&\#KK$ށ{ɋnn{_ܵ 3il׋Zԅ=ex}>+iqY=o'i-|_`ū*,XPMH8qX|A z9dg=x W3Cg7(QO/8J '"s g)QږNr jq`rX'+Dz|Rуܲ#(2N6*F B@9ȤQ SQ,Ro~hJ=.d 43wIPsGj@iװכ 6li oG<"Wf:cGOH0[[n5ͱWNjGcGa`G8F!q&-)PW+59RHeNWaSs30#v^H =b n]]}4\6tn17qv= wA8*c~!(ֿN ~{~mma44b>'nvD#aVZvZhͶgZ'bZ %ǷC . v'%(VPrĊi HHCXI'b=b2>6>G3>*)3m/ B)R#뛽BP He$NeIr.<#:j>1˔TRsj@MYn's?MD͛`O}ND}hu #/NQq˫尉8 *L,Q%xfVDø=6M7t@CzRoQӢtwmBi1ҖT'TJ 8Z'_p P٥&RO X5D7 "RqC}$&zNG4-lD0k ݴor<f xyixKB ervm .- X$g021 c4xGo.qm Ҧs8Dq|k&sbfm!JlNI0?SS&c⮫wIPoK[Sωs`0O7FV ybI3dΏuOO}7ĘST9*sb%ruDܡYL*W F!kky(VfZ7zś(hl.Oy.>OMHʧHLia%7<3RM97t#7@4rPGDY0i,Ipqk!JD@΅o$mR$F@&ಿV9C9G3JR(ak !7li =C'p*Td02)#1[1F' E2H ,~y16L Bh#?xs]]HZp{_pz:9N|.qh9Up*wĆNy0$U:CNcbZhr\z~Pln6l.~G2uKb<*jfAsj:E22A~PV!w6Pv{E"Qi~ U]kቊtա*J2QT :|dPή㏣r J>~YE {f!ZQUUc#k*mn^: =GCj򥊲E Jƶ}/Rӹ rg|= \̛MLE+w[|; uϢW.=7ȝЦc\\^ڬ.:Ar _ގovvYxB̷7>zbwuﭒ/?qU礝a2(ot-fy3,zͨ K|;N9ov;.y Z~/h>͙tȆU;Loi;MjqK:g]DʙYYˡRqWG/t1DDlK*ydDeі gbH8.m;Q Bt$7!:$E-) c|rq H9#.%y0Rz&V/pUNqgS3ܿaf񛮤_// $ɡXxN3ιKLS.EH$h^j> d#a kI>CjZ=>uqC?NhȖ.'.&t]g4-z"hF X ,Z俧 ,JX,}`CtH\!`CLgU+IW#vqQ2Ӌ'(;$aW0]WVֳ^\=AqʼnF;$2`;#.+*=pqQWECJPWxgE]W1vq(^\=Aq%)__\!`xuS J2h5mW zWOF\) Y QpϋS`ո<%G9vT}9a3*+**ԲWOP\(-Įro&;#2ST'4WOP\qNu "l "ǸܐhA]\eR +o+; ەI Q2+OQ\e[;$$ׄwG\eRwE\eF]\!JbW ,K{WazviH1q&\z.Z8Γ”<ߣao5PbQ.l;6&(AsebH(6#wF"+Y|*X w\&`;Ngv7xZGM1ǖZ⿪|`J$ܓW{ᬘw{EҜ>LiiL`NJm*D&[jB'=醋č Dmۮ4d"u!rdG?ʵ2}ħ9}D${RvQMq5)jąGN(R=L53;Jhq=8?i ȕjڅ7y\ vVzI5`&'5LI';E#r>![Krɀr択 n=ɏяej1`֊kJ~%&lKZ-]IP"5=I3Hh?H(yF/HM{jpp,j,and;r'ڴe;nS\ pG7Z F˕PMrrD&`VpZJh.WBiy=+m]ɠF7Z Z>ʥ=s&1(rɕW n h5ٱ˕P0ʕ#뗢KY~>0s|Iג]_~vzZw[\߼}{kfMI<켴9ª ~Ȏɏgz_^÷W+P^f_s<p=7̽W76yԱOڗ5_YB^3E=Џܽn꽯g?G?u %iϥw\Jpz|-#dwkioE[>tctwmCAe6ӧ./gTˆ3[]Lv>_V\|S;ډxɣY{yt&NBjϭ~[f'W׳f~+|͗^m.o1zEcu5pĚ-EcBkhјP:=Ec{yr1`4b*"Q9@e؝¯OpY !胰;jPzgAwϽ5o_p[[~ ?쟯&f|=[]W׃[ Tl0`0+NfyWn-ڞF.Ruk)TAӧ)VzkI+_<]=fٟ˩Ww ՐsnӪj\|yc -l[vb尘mm|d`nn(ybBEIs=TU5ӁvIPNT2NiJOT.|5`IfS5 >]xPsa9+:|!NLJs)+Rn6p< _Qcc&IfKjǽ{uّ +!{v쓱ύ/Czy~~=0~|srkCbR@.wZU߰Ղ߹,R9-r{ 9:ۤn;Wl4|C ~"~6ԇ;6}9>OK[y=o7Ϳ*q]6lum~n[7#8|UG‚K, ZHX(Hx GBW$W&+ lg r%4mhr*PiҼUsz˕aVj%l0@$W6YH쵩Fw' up#C3a"6 GDc-фК3J;3h 7#3Hpm5֏^yF(WKag6]P5"A랛rJ隣#WfGW0]0Zand +jӦ6`++WȕಫE@wu0J'C"guȕz_\YRc+\9ȕqr$rEr`mKm ?vʱ;+ $Wfc+uT\ ->etf=+U\\ (-j+8%(WcEr`M\\ f]h+>UPayhVt~N갂-mkz- fW-Pny&l9IJM~{0eaa4XN;͡L%&$kZ7DR(,O$ {W\8R=SLZJh.WBS}fؘzJp]5SLGTQpiU\b}ع\ z0/Fv4tVj\rUjӦlۊ lM5r%."WB4vej/8++[c+u]r5ҙIQLTa$$ee.WBI\\h)pȕ'B'Gr1PSt`gzW\ m}t%+j*(cMUPZX\ .Z ZɠPj/*7C⚫b8=d搫fJNrxCRl,IV¬_nVWMpTDhM}4!NFPJc=pZJh\ \V*MYÀށ~r5vGC(Wterus$W4.U\ 𮣫aZk.WB'G" mKB-r%r%'C2d6ɕs=r\ȕ0v@\\9=$Wz VW#W$W W5} = w-ar暯gHD7u.u~{{޼yskQ?^S_.QW *Oq{w"OZUנ?vI?~-SsR>"_[|@kVT~/oQt2>,. Dÿէ}@u^ ^to+Nkܙ E7_;s5']zu>}-.[pnJ3Kaw-7/>~+x'U~Rxs=QAZ W{jYi8ͬܠn֟W4xCyoL0yjly_&r%s2wɆi-57zu:~E"2`^/K6] oQ 4-K:+uh wM agx餜ERVr66!R&hTNZF\HMRn uvA>MӼ#B:LCֆҩ! ^m0`%qRli3ʜ,*VZ[MPJ*JaKiJi3#jk&ӦkuM!U&Fcr>?=}wj-usm9Y?lK+@4\ 6m*Ju1O´ =e [z y kFٹ#4mYmͪpr ^^{#a6Z^7ӾKȺ 0hRJ7(l. gu(cu(JCSAaz:E~!x&44z3:]]HșPfB҉y0+CBqx# #.h7t$qNYSv%bXyMr zB!AK:tSr*]pNCT+IhmՙȪ"E$R:spO@xP)$Kt9qB['8&B7 eVFBtۄUP5H5؀Bb 1r1hMQv%,l/faڐXgԡ%)_TH {ݥ΄6fsS2P&4ÊOJX՚JwB ֞F ޅ#XM*" U4`Y-r64- rݵ(*Dt b-SFa|:vޔ;QU3`D6٦QlƷRCdP*oΝE&6VLY؇[UP_Iqyu,u-i̶]63Y7Z \oްmQrP(2, sf޻MEeS(-Je l  Q ۡ[`e;e ̕`BRq*Af)2 3҄׮TWZ 1kdPi(tgDr(+nMcXN8 %80H6ۍM-"!]8x7!⒛YqAd4 a?Po H",Qk4vVvZIe3MCt%,,towǂ% `-6!a :smCBQu*rg2 %Y)-۶:"J892|JXd50`ZVw= eC}![[ PS ! d\]X\)]X5ʽը AI(uɭOrap5P!Q02Dt0P O@ A ?ػ7+W} 랩x'6F$#@V ^!!'|({GC ak Hf7u֩st{ኵMB֊֕b1A X?iV/;tkVK+f$4_(*)_,KUa8qD1`ۀia;wmLhx9_YS5p=h|[UJF=FղM]&h6`-$t>b@uP<*2UWBҥ`r\Jj W8`NjK,R舸0F!=(9Hi&dbN 3HeHT. U t~?XeG偺S&6X4_ 6<&bf06m`٣*YȩՏY_<9Qo3JN:K `ҹd]6`-As-nu>45-XxMwv>NʖsMLO -tӭ26TE\5 vPpRԘf먵.h֐DMy`9 7P〖_ A2÷A]]AA3`:3fsC < %\#\ Ec˄  R`*tG'' Hv X]wu֫ɰll+dOW -b^D W}nB\$\1{94.: ;bC!DMr RŢ4򨚌Zk00Yp1-и ǀG\ "e~8c@sJ6H0r#SZcuփ*F((I<(1SA"ՔqOk[HykPͭp^%Po5Be_́;J*Ykdڠ@~ 0hYɌBN3`h=peFhJ7F{s|d8@ɌS:p2q˺ %&I%@b~ pi0isԘ 9$UUr"b٥0;FfAjR$CdO]H'4\N0KklB:KBרzwE"Bީ#DoaL2 ՠ]oӛq:]veګ)F9)`(~.l5Crz/;/RX$67M`V EEm=O~78YI4 d8XoNa<;]tzC3e-ޝmR/~ѻ^fs|+].9?> ŷޔ٨}Lf~{ۺ=/?Bφ|W7a988psN NGJ1r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'91m@lD@kr uN @ 9wZ@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 x@lm_&uiun_oJվA.'7h$+O0>¨/%Ÿqqq+6. {p ]UN:]!,!]Y]\Ƶ%}0oVE>A rP+E`:A>I6=« (7eI9ꗐGԭujq^wrV rUkr%O8)߫/3t\6U m5hnJIi߼ʯͫ?P1%o.jk;7Ioףk|ZѰάc+QJ}<شFIo'DWý:R'߯B/TEfoȶQmM^4$k)&vt(c3v˺-fn WmMc 5M%-S.e]W<#Us]c"7 ygbeڼk֡}h!UFC-ȓO̦W3Zsι op>Tvb)Ϯw&dhU3 MWV+ZNRrH^-=+*\כx^xOɃDW_ ];v=?CK C?(0?,]V?~PZ{Zt+Nt]ϥX)p ]UN:]:(+!X :]7tUJh ;u(#zt%ҌU+m_誢'OWDWrV$+ ]܆+4F]=BZG+v=Z\/L_Jkɯ]U >J2ߧv*\BWVNWQDWʢyHTIF4^_mM'awַRFor96'|%Sŭ.|ۡVw_I%ڻzkgUcXU77+ڇ26&p}ZZVJh9u(5 ~`ahYzCyjwt%>ܻ {CW3}+L:]U/BWJXJ7G]"N0Pi!]޽>Ap]U}ӧNW@)Nm7+LiIshɯ]U֮#]ihwu`!ToUQWgNW@)'zte/a^~g݅؞ˤ٨[goԘptmdɴ,,#SN6[h%q+,-ۂ]^2TzC3Pj&7tݼQg4 B!31Ixe.p(S2:T .(.&l=ogYf EUg^M K .3l?u~2.'g7?nI^ut*?ղw9,.بHF_4lgTD\߄x|OwŰĤEos+f6`\R<Ų>10Xǁ&Lt yJkjNXlkWA+[0< p.ʤ_ոt<>z%-zH98[Mf&Zpk(#s6iǣm[Z  S.ʜNl6[[_pxd!HWJg'8UL#JHHV/$l}Yɨ},ɜr4m%H!(@(^W4JBʦ]ގ0 к;؎&Hd!eW ukίC*5,/V\.sd/Wql^o6:JP6CbP7Q+k6JAeV'ymMC.y^uy7tͧ:W织 >_gNBP5)BUmVZ/j_1bo1j=3 \&ӎЂ,>mz~SjrjO[`RU*)nt^rV6c>κ 9?ZrQeuduzFח?.??>]p.Ӌg?|hEIBA}݋XEMҩYuߥ\{*)ᣅܖ%r$O˷6ٿ",mq$ ,$ )n%~(ȱвx)^I֗kKv_ gFx9ŪOYw\]_wg&3a\\i@جW$iE+<ʉ`&ī/bvoGv\=Zn_g(˴y֧hM<#WH?8Gb*$$1-B d*ed"(l2Ά7w6L^jopXTݷA}祼{j]߆{~웉ytb&} /hO 0(k)E*=LNf%Em/Ncə,Ͽ]p!S ?8 6'm^rkAkvdS-BR:PwEл ෱V_Ƽ8Z & 2σepGbXImA;OI ͇Oge}?ć^V]|$$98 G􇥻?Վzo,ܭ|jDQ blu$8xۂoY(w>+֠{~k3[F L/v}U X\yV7۠PrʨExIO8lSIŦ$cZfBkaccB=fdӄ{[>|<L] 5j%M^E(R;M)eߢ 닩QЩGZd#$B]BX}Bdܱt&S)8OeAaM" A+1"̊t٠rآLD+k(4/} }}.*Mǀ'lPka?¨Qwy+t~.s;n{w̝11ӭ&euvW{RcҁM`#01Mhd Yyx:<{^ӑN#0=Ǻ`Y(eLi5c3ngfJW´j',nU`tpsщQ0e,.VM8׋wswc +ıՋ q?N8/0ܱۿ1c9} FcL4JD%Dn~gyut6/ƭ^.O[v]i+=Ƀ',2U:c1vKyPAA<̹]l)O%VksxZ}m!X.︯G4w_Z>+[֗~G^_rn7۞z%4|x=+lK6z46@,rub`X֡6C^[[͛sQB|ttL1e!(Y(,(Qg L&1RLXQGb6I$ TT(g:w&a_c+tNgށ73^b||c{['ro{s"&ΡOkEV_;;b&֍.T Ef $Yxuwvҳ{gIJy@ϒ{(XdpWBAR 1TYh|58/ALAl7!>,#IB>[dT`ԆbU&,dt ;爰]>'蟇+6GIk_ĦIZyvÿm0r:^3Z^ `~uUxtWYj>u\6*4YhTT"ɦO.N>{vM|}rvD)tC$"KZIOh)`B@,6Ej9|ef(90EATM~Rhtd@=/.JÈsJs$2,ېC mn;ye!62LDR*VW2++i!\BJ$liʏ]*&.ktfDx&!LytQZة(NA3s^hX lJ/>g筍X:H=;eE,TljFER+pꭷ2T Sz_(x)(^RC@h G+{Rn>_#׿Q)וM:hDqZC :SNڔ@V3 PP3棞'uQ232UAPM^ vȣ)oW髃r.L蝕ٱ D FCژ67~@\ٍgDPXYoJO[O[gK3:pJ[C5( Ic*h9wۗNH\I% WA2/qӆjPa l4:Wq j!sط6q:f;\GzuS6G܌[qoEgo+t`.FpY oL\\,ZC:%U sjϚm6Y|vWNw>H hTMV୊l DF[%DƳE͟zYE}P`,AHoYFJA0N,m*Y's#e,@tӪ`ڌ$z"B`3qm6Wݰ~teLQÍFg0Hb k>rm)y+5GS*+JkcH{D*ﳳ5>8-en.ݓ.tOia:X/;yG+EO$l@)KG$zY@ʅL$EF&~Qxm"_k)1Lð/ 9ד0ݭG45'Mţ4޸yIx0>!kph 3&آ,@!xHj6&0^$,Y Y}H:GL bd" Aj]s{rN?"y~VG NMFMT 2򒉼!9`BfeBw!YXogœIIJ^ $xc^yW !0sPKgU089> VFŐ%k|ɖ 2PYLI١D ƨ(utlSI)t퇎8 ܅}۝A;Kw OB.d4>q!'9m!Ii ]cs8/k!dn1VȽuguO*"*kuí͢ug:os8xl;\%.W{=R]dOo|̚g|G/~zFċ/\ Xo86oy;6$+]/%NX Y&{%OYgZRD*߯E!9-9~g3UUՓ>A s:uy~a౅(#l3mwmJ9Ej.RђGii!D] &r]ZH9~.n?sq=BohOQ[iCuXd;4k KBRd/33RA(I}>>;t2[\}8EWMo=Xُ-޾y:\HVJf)|NYj36i"&e [)ykuߺ`^8s_=^u}Ґ'v]eꡮ|W%_m~j(-S-^jf419Y ,&q f/m“u2<;ZٱKa$jiM Ct Yۈ1ј&_U Zƕk=vie{!OW?W>iD ]مi~ۖ뒯M#zr,^]n4]Ǟuqs'?ނigMVsXAqVEhcѨE_DㄑwnmBthmqYZeĕUdK Fd+]`&y"N غ\!.bg"q踤!K4%}#< 3iǤth&>ˬLȴ*lC}/y.&GBB2 H0YkLhy&遚J^esMfiۅB- Y.[%mJts{ѽIW2*X2SP Z(b̒)E!OJ$T>2l1]Fc޳2㭌h%L#u֋R,Ȏ 'eTdlMuZUZcak=c[YzwN&72^?{s8o0W.?ǓgN<.E%j$SA[@12}ՐQvw@6gE{` Hoԙ|;flhZC,7w.-9O_^%+nwlKm2uOނ}hy`AxH^ʯB&,p0ɛL%XぬJ˶Pd eȑIkbbdLј Gb|2@N5e&iNEP\cWZؖe"=V̥\jpԳ%UE xŨM*-r>۬4Q}0TR\C2!'M 81ؚ}"Ksmh[siG Fc,x0Arn(0Y3().ZrJMdH,型bWxZؒmlXegU >8яicME՛93$WiSKfRKy׫7sfz Voīo+XG_{}^5 c Q<Y!k: >-l?wݨ,uf.iA2Ф090!`[- pL`+j2WE|yٳݰ-OWH|- bn&lQע}?70zg:O=LV e吋DU uZ.7Y,Z]t&Yq6:M(Tq\k!dPhP^mM#A;R6qjW_}لj;Dxj6vUY0n}Up & '2b{?DHq{UFz_v7kr9(vȁm$OY)%$^Q lPa, .r;{1>FʍcWq{Лw>ޫ.?o:c|h#^ySߝЛٟ#pRO˱5¾edVDދ̇m A)9MG >ŎIWz:>Y,uL݌&oۯAYY&eޅT Iy3O})Vt?~/X~{yFu_SVdߗՏ.ƒ=?z4P!}vt r$مPiHoHA`ZbN&+u,@m=>u_woJz[f< 28.ffBg=۟npq>Z|S?Naz8xzU,ZE¦,5jhYjHp?Io.?~Ko߾N#??K|o{a ꖓ&ݶ; nN{\Ce҇i)0w඗ח7Weɿk!"x)k')j͞62.|h.s T@p]<_)zbe UAN| v^^^}E˹C3/R3t>i n}f 4|2>@Gn̘{0l#Q4tw }ts=fWf-A,i^niQ d t~蘸حY71^sLظ;4PD1qO m;1tBXz^&A!y_L@;2j+~yq HjiUȂOw2SL^VZF}Q*_'GJĖ๯m Tl3-l#D V4<t֧b'nzȝuvb;!?<rȪϮlV =vXZՔoő[0VOZ'WP_;F?.P b 8ydt^3}!mp&я&Iݹ=h1k80׮Aۻ0qa: VSTȭ:5^T\U?"\ZV"ZpEj]UQ)TU)%"\`m\\Ȼ+Ae)J LU"u5*r໶t,*-:A\)Vۊp2gC"WZpUԚ[WE=NWX7Qiw`fE._E|I7j01p$"};X r<9>4t4i~V 7_\K*ޕϘe2#9]4#t0"Fe0p~uE?y"2ϴVׯ_Wmf Uu17%~C7/.HM/vy`)]KU9?/n|yA_lˢ;t=b rP%Hvpw)~oKgl ff4090d0/F?|/Y 4F2ߑ ߑf _u7-7{& Ye=En=VE%ސ=AC3U"؊jpEr-gનu\OWkQJ9jp5 WET뭫+\%zH0GfrW5G6RiXaq\am#+=DL^["UxqUTv-Ap%Pb=\`\\Zu\z\ 2PH\^Ha**qu" T+gY ZeW'+)Q˚H\jUQ;:E\)iɺ*WE$\tWE%W'+W|uj%rglBjT1`3 /K,s[k^ͬSk{\.7^rVF^g۝XKˢ󟩏,^jfMs@iA{}mnnWF:$l㥷ԛ}Y}r6%/Ծ [qk7 {Ohg+r]l3>V~}6C pUz HWEGvUQ * -W+H0\.Xc,H#Uk6|4?lq`qh#g"o<6S + 7qms̰pE9jpUjpUZu\J Jhm6U6u-";ri *]H#mAF*GtWj\W6=gdYˊpE^E\BԂVBqUTv-Ap% *Ulu5"ˎKqUb筫RW'+JtFi\Y Z+R:A\``ME"뱮HwOsj\{xԺ3=|Neؙ+p\=wɲ~"\A0WCSu*C{usnoz}PY/(;zk04ЧwC~_ҧۯ(~"5t}IW?A_]NM7_޴:}o?pf`\yW+۞i%c[]>;JƱUy`Zs6,M9Ra66 )jKeVL<_aH:e4ґ|g?/4(Hg_jк16R! XK6u)drjACZJƪ-ՆRkPFRlb8ׂ$Ȣ5N+S]![]#@.߾,R,\ٲ"$ 'ZIj1'ZpO=!Pd0vk=!1+>o8{ptBR?hB0Ko7յd1[c(W h6\y2VDRC4 0ИUS]C/ m 5q&AB8|Gz#.(τtӇTol4h*dcOQAɘcP P$}|:7%ИUi5zpƣ $m9r=RNPBc,Eݒ%}DD$mѕ si"'0 6t6DXsՐRDT)X$Cff|m@`j4Ub>'4/.UL IŚn|3!EꩧΡB37%1s#OuQuAlڳd[hJjCv:va5) _f&XKbՉRn)VBFn10xW 5x o0O}Ƿ:8Yæap$g_GWA[]Jǚژ[p%/DHMƛRc땠;3z!kJD/5V㭢+J N'z`e㾐9?7ahƗfӢ׸dPXPrm⊑ Zێg0qYl8m4fSM0XK5`e- ;fYt6Pd02PbIWw-m l#7& ɱu#k\=q{8ر˒c8@σH~<䜏t[5t^4w`J+,w" ӋPr\VXܢfM(`i&P_y pCoI?סFK}z?ra2sU@0\ȥޅgz󻣕^ {b!q)~>d-RD8YSrK> q<.{6IS24٫j:fߧL Ns?VXg(fi;q2,/;6OARY /A\YэPƺYV0q%P+sbr&P=`t oWUhbHk:aZ'L'"M.FV[Wi52./Mhnd!D遛An` v>6: Y\u:-/d傓[ a0U#f~[4eʲt4TiXhW٪uk l}v՜\ :e,r-iZG%[-:$ N,85+`@K˂JsB@Y0ʂQ`,e( FY0ʂQ`,e( FY0ʂQ`,e( FY0ʂQ`,e( FY0ʂQ`,e( FY0ʂQ`,pe\ePqpUw@w<ӄVy :LmqcO F^^`E557%mfD$@'{lR͚G 0";24g:cpcTKN KaP(B P(@ P(B P(@ P(B P(@ P(B P(@ P(B P(@ P(B P@AK(Se3(@+_(A! xB%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T琍}R%)Ȥx({Z@(a*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@2^)zgGo)4E@7>0m3_dz@ 8pK% \e"\}.%pKך0bI S՝\CBWQtPjt+zϦ=ˆj3-[} OKWj3\] "]=)JtowEW ]vFUBIcJ)!UooV}R2+ά Ct~]%ubwPrtut%8UCt9]%tZ*i[#lp$mnR0j~)~I1[q 6֏ͥMl2ZDZTu0BZqr>%OT2jK5@R5U~og?^j4tJ\Le/hu8{{ jZoUg[;ڲݫABWDuir) \o-j߽C7X ]%;tZ}R+Mj-K}R7{6uPTjLNldqclqdUŤl鉆;W8?C ),AaL8Iy,lrBWO/a ] 1~^\o e²ڋUAMg˶JLQR6k[s^Y[< SKN$tgTMpʨ”P !nd)O=J>0<6;Ȏ[ZؔXDF@i`MYhAߡ95V=UiJbm4Ju%݉ OpigZk5 %HHVX2t,Jtǡ+vϦg="O%l3b S/-oV':u#Y%ۀCJOCt[vڧZ HWHWLNd*3t*JhUBi5UDfCtUkDW*}lu{J'HWBI!DW a;1]z/etPrtut%Rw YJp ]%b' µC+z+liw*+t>vtPZtut!E' SiR>kJ4J^r/6)[{? j c.AHt&rr֦׏[.0i52//危FB^Htg4Jq+mВKY.8 RdL8M#[ҊFޢ9W,(t4T,,4z:MBGGqf[e4[[&cvQ3E=߿v}&Xufl컣3G͇auc`y)Ʌ`!yDptv}YsZ:8Wאָя[W5+)*~[7f2{m^yUڌT9/{ȴ)Ó{9hvH kOPQ0z I1buBVZ'0~t+0DnʂxU^bQ*mx:qJB]T3cIV @/v#Q05)cCh8 c(IGrU)Z2Umi, *ViN2.Mat U6m0 bqip 4^]}vγ~u>8x쫼Uo6tᕛҹ>&$CÐ5xAU)6xQV(fOG7鏪-r%E7AgEw9ZؘNn[Q!B3*O+~cNP l.~4Oۆ]fGj>ӛVi-9x/0-N6 'Jc TC:BT\b_{nXi/vKlRFYSwB\*+ܕb)a *bf^ZRr m+|$fhjhOGHʌ =:he"8mMjw1Q-`0ͱ##վz u6 $ `Chca ;¦ D+G*c4B?  X݈f!ipC'_8d]:{Ty7: C?i֥ L?e3uvZCǃƫE(S=|O(,6 !},q@(iZ4=j`"&B.HT+)=7btٛ΀{qdŽFXUmA'$. MK?YZr,(zW.İXJN/ WHDlϖ-ҫYh0/K9is;ٟ= e_/u&s'`7^QBϮity e/'~#wPaP An -|ߎ[f͍Or^T}ջ,-~}1udvWCn݁82|~N}/ 4sQ5כg0&ga n~Y/Q:*MyhgGnK': eZWL]޺Uob9}9>GEhˆQ %+K,"-+XB2jM+) Rq/6,)/{qa%Mv^Z=O-QezyJWmxq;a^7"iLtltC3*˵L)&R@2V^:YхP֔Z8o:8_liuM:l:*60/m/;Mϼ?z#*|6eRAgp!hn ڸѭ:d/`_c7jl!7XLcqVUHehL(·0 gUYS2A x2[/'Q{ЁՂ5?/{Q2M FvjX^d$ډ}dK~8.xV-e7)w\ ʜ7Ta;ܧ-neNiNMF؀=I4%z'\io%i] kC\.V(^F :d(pb“X/, 0SE m&E4EŨM .piŦ?+0]KR(WRDQ0!B}p"dﴃ,9 ^;BhLQ: !J%P@gqM;RggqDʔ@gS䅢pNȂ<Q:F1 uD,Y{F=A5NRHbXR!8<ʲ2xWs-9/Mq:52tvy0l_nZ6܇Zw,/ܷWhk|pwQ}}Y_c6wc}OV_==pm}_l'b:0wMgr_|٥b+RM/AU;KKK5J|S9\c`t:b1(OĈVC$îM d's$N:ʾN'eщ怎?NApWF`=Ȃ̎S!|PFjZ` tщ͔u(N"9䚈SVͦUA s@*/ Y.kdݍ|?uu^K6|)Gzg[cۭ ^}W%E>w.. <ڨPm&*e5d3 g|S#oj.ߌ(׾ĬF`CaAKKC6; dw'6m\(&Ri蘆O@8ZR~Z|tQ/s#ʖC4 rQoUO7!ˇag]6.r<|( lCs"_3 &&*v02R>GΪf$7EnȽۀEglI9/%%>,: RXۢ)Y\dUP.(Q5΢HwdW! {ITPJT@Eq͜l|By|;U>f5рwUXbR.j<$%`5XBg|y/ӯ<0FOd}?%N Ċ#_QMP7'Mے7"fn׳jPM1⁼!`-5:0=} q7|tTrƼ! 618 6Y=F&Ʈ&VՓO&ȟ>ZC7t2ԁjbÀ%B'7i6[6 {3Age2kd]1WL&qtk\fF_bM!M=ʷhbUÒX95FH7s%'ίw}e3i5%(P}rɉj| /ۋ1U֓G965JR762ڢTx7o%DjRmՔ ^J}TـorcHV _.V?=RSC .+;ŀ dՔj*ŜHF(Zs6d4QULh8'GP @$MdmƱOm aWR2(CZwlZIׇ?lO:%n|L fb~,1Ac{{p=Sx⪓"4܀p (f056LxfIAfGSD3'r순@ylS%gj`) kfЕW]k4 9%KhrFm`9⦜:U<km`|*9hF?#iMXZ ~) K|m͊ȤM1W%1L%G5'Kق|kH+.ԝt-s L Ϸ)0묇Y (nP:y(Qx92wĻPMf+ڤ1%iždT*Qv}YUbj߶3. tf*:L*C.23f~ؗgmE`h!MOL٥z !* DAd( qW 6뽎hFR)-)&*2)j9F=SO)jy)Nȑ7Y4\N&7(?I\&"CJW| ޱ; Vz%̇u1kK2)B)YU0W!d jY qYyt\-cX|j4jL: lvd 9$ )F2[sQG&kMRQKn={X,ZbT|r1~EnsW]jCRgˋtq&;*",:wT hy[jCQR Kɝ gX=_7J_JlXp‡xq`.1^Aђw J:i Iwf\Y4_noGE(zZ W_.:A6:?}*|pϭ&Ei><ڛDj ^^vmVxD爫!OSe̿-Upˋn|ڍ]-r!Gp{ٲM~J%7!exzK4R5riG0ױњݧng$./O~71уv:zv^.|;w>ziwp3y9i?̯vݵk>%!VyH_sBtLY8^ӽk]ݡv7>Oz3M߭P{iȑO]Hw-ǜeYP;L68/Jɴ/fN_ݏM5w~@`mkԶQEc0+M4TcMԗ| }۳muQ[Kz [[ % A9F $b"1-\ 〤@8_)bA$WdD@\l KrkBYW!wn{Ki7,ɑB먤%X1 #XǛz35+fښ l4\X@! T)T\TB a\𕹦Zۅ$ r+%ñp{%v X%IΌ9@׭V yƱu΅nʅ'ͅfn>7d%zf~W q6:Xܼ]8FrBD% zPVT6&fVS14y*Bl @E E6 R2`Z䐅Tj To˜;]]Kfm9k)kOv@u>LkM~wm*q" @biZl۽m䐉o+iA3l9l46/x7Css dKZ8Kn`+Teː XDpF7I6$S 뙇yS C0mhz&)D,D0ZQ$Ce^B&\T8Ń&` j땈PB_ GIi-XB#jMI"`[DMDM2Đ:{%rE[Xx_0hT<.xłIL&q%5)̉#p18;jG<[&z4+e/*y}Ap%@яYVAYC$mdIZ&FiZݎG% dw>|:1rubٝN5ACD Q neR Lycji8QӔ_zC]Lp99{qe+gH24Y֭`xF]}?8 C;ˬSH/:+ ƭdi\RNPa< ꎖwVEKQp&Zq6:L@85c2(`P@^CM#v01%lZWlWFe{Yȳ4XmQܨnTp׶w=8QEj/~b~d 9MuϠY12(O pY `nk%iLJ)&iHOb ʌ`14$[ngt/?lyt=<wDXu)i yK\Pp >$rIRB28D  SWл |0Z)k}0ih#ё s|U$P2o|! ?Cؤ`ɇ7ԭӭI!#^WZ\ {Z' -ht#!0ˍBAb=\&Bh6d)YB/<100t?:`FjtPԉڥhMF8tα߿$ugAp|}`r?mXڃ{`7pK;mRIbFSnyYH e6o&ˠ_3XokGCdٝ. o.'{^tŢJ>_ŒE&HD4"6; %K<*p^fSot;f8_yVׁp@8b5S' ^TW7/Z :;-nײ2N7'ML|6~{o>|,M,ײfx¬R.bՏ|h bal[Viz.sMItolq#iF{cG(9e[ڗn <wٞ,\5F:jIS[-EaNF˄$ ,zz6I_WSM%Ļ~ÇEfn.˼gb}qEg˿qvcSTΚɓUƳ JԸW;U $z1;;U^Mg+=5>WϚ~oG52Xa#[|#_C>dtvrVӝ_V?[=w{:r5/k!}fQp x8^m2|cjaSmhx  _Fnv+KʺpG ;λT 2WB.#\'wE!w)_ &]B5q7o@) h\EϟCOxB(v<7l<)XoY³]G]-ᰋ}ܻk -|dI%˭k⨇{O4 nλ'Ayυ3 wYG@}Lm',5 4(8&l" &ٳ8u6)e)w^Iň4 ^ H2_^@ROr=Dx]F*SD ωLJi>1R/d RqPkgH`d^/zgWCI[ȽtLvY5b7Ie#6b[k1 E\LNx` (v18_8i}xȓ >!OI?0|)]Mr>OZ.]I+i EL*8k_ٷ\Q}{2=>jS>hh(=}t- ύš7%?vgW{9Z6,hws_F5?hOm. Z]M.sWa,.T WiӬ׳u_V?W׋d<|ܺ)1ǪǼ|,-2s2Gf<֌5)qjgH8#-6\%fCڄ-Ylbb{{_xϢ/Gngv?'.7я1s!<)Hh,S72dXnk"K>6Ro3M$ɼ;=-Bo[≯fƅZA h͌ 2j #qa֐a Pչ 2.+T)mJx&#\`&I6BZ+uvq*WlJŤW s P Z1t\a%ZpuR:+ +P|8Lj+M +M5;;vmj.?z%eŊȢW|]gǶopԜs_ gUm]j|Rz{Bi9i|XA6*UlNԴU*f7Nʪj .eu;|cѕ6z`.^Mc5 "#«"_U< M (Wg3WjC4e1mYBY WWr-Wbæ,m1 ֑T@lub j%Qesn{+9e?ߺu#oR{w0f[v]P.(f3j-T ڞw9ԭ}3" ?"S░[6o#9ak Q&22xTʌ1bJlW\`Ur5Z=x\oJnHC+tlNr)=pxT7WTʁJv,ڶ@4\\Fs|BB\!s]q>wI.=t4g7÷PWG+6#\`qQWV URpu3 W(Z2x U@K^p%ɜA,W WlQC4q6AW(W\pR5aCdxqNr)WPI?Ԫq%%gY]`T;D+Tq*W^Wǃ+%9YW X3 P. *9t\JS殎Wh+ wvۛ*Z)ҥZ8/Mnn>>]6M[2ǒE&,5DDo.ãnac%D7KKJ)9hv8N_# -_ٛM</q)h)UL}/{rmwY~/M,@e}bTbZ_7*1mB-YZ(al}qr>w/y6] 5cUL)C$bQ"#CӃ&We3jW5@%cGh9F:iBkl~7eqed+Y*\CWrunU269%1~x몓`M8uM9PH' lUt)ڶ驖FʌpׄW(W\pEqW#PP% 8P-LBq%-Y g+qQ-C@1}*er־Km 6P'HߗφrgC?]ms7+*~Jn$4ޡ*}پlRfSyP"MRP҈"gXPbF7tl1Eibc6C5BW-sR]70.y ^znk*LP6 &FAWƃL}U֌CW ЕEʢlttrR*THtEـЕkB+ӕE)XHW S  a+ WP Jc+`%gDJ]!`x0te  ]YePCW !Е˂ -Z9TE96"]$j}^p+,Z6z(V tjeب2-n[a !qv0gy)LDs~ PVUVRPW՗ Wt=cBBw[s̀D.'< h蟄Y1y4mU$5΃{DޢdSZ^wL0[ W<']y}(ը`-]^jՃg?T*B]YҌ,J#]@R +, ,\ ]Y"$ +!CW.ЕE,QWS]Y++ ,ZNWK+4U@t7Еh(th%c+rT"] 3%+ ]!ZNG(9tJD8Z|ҍ7yZ7iš@7^M Bf*^E4Phrnh5QʈPJqp*=v""fGZhnNld8q WYfۣ2))# g{khߝ,JSL/ #qU=o14l6?QzM{gnxzܠt''x%=[$"7RdY󩒓:@ :G`Ш^մ+pRv*%"_Oѓ/W~LGbvy4~&L,d1{qXuM]=`-s11ebtcڼ|yJ7Ƈ\X=8N_GM;"X^cU^e{UϖdO$0JqsWI&OJ0TeN ˒]_cw֌p7DNktN$5 <y4%hGwIjsp|MKyL$iueqQb5l;lBK1!Od_[*~H;)ʱCXǏ-l,Y$.,wڏO9 1 }I󊥻+o#pp9?kDL''.]|=|=j!glW+|>J&+ ?}ty_@1"H29s  R\-Ol'Y ?YNv"rgvKڸPs؈--{P˾s7[W#nlr޾T޺T8ZuQ}Ci0Ʋ=kJˁ4ׁt&QCή*htZUe"Sjߖ_Gk첈.! @O\s6%GsSCpNP\zR`-Đ4?*zpR?j 9wZZ:\;3k<2bU3el8Y%#Mw#`Uփ'u%JOA}ڙh$e]M Pm4wTNF#F3JJ& %iPdq ìTFuwIq`rє=lʍ;jFhe8 E;&źq,?wVloďJA1mJw#`%tcF[H-y1iU-~¨n/flc6D kMݍ[ZeTkfr1"Kgc]o&$u(9'L##Œz0N"b G2e wo{j*͘13'QSc w5Y:nW1¶{z$IY_Rl/YO5kSו7iL'K_V뭍yR|'-$E/H9- ^VM9iqRYeiRk٣C2g; ial ACI~f*h]JTSΎ-QŚ .r7k;߸r\Ri.KҺb]?bd5M'n5:»\ arCRtowY,E+JT2/5U&!', )H 书 3hYZa=!KdQ~X_yL?ΰaQEOw|3y?fJ-%V)~m{9q|_+@8 &nF6VSR*fDsAa[f[eDm =^.d(mH䒍jcoC'u-+4%AvҲ&jy*}iGEu\[][QXKu€L4S^s$h.cN=U̩%X4g7N:C AuYCYEGst*ZZF̄ʖf*(##:|l9j L\&t&D&^kkX92R'w鳪\ͭ6}4}4OK]|ӍbKWgtq/'6YC*JQ@ErJQ(J*cExNr#tːFW}_u?Ϫβ{>H.i&f)BL9ӅLDATI>_^=%}^_47}ut1`#Qx܁0ʡBЬJ(PYYIbdY)ťeIYBWTiRVu2u#Ȟv{o6P]W*EBr޻Vt'黉ujOWtynMvMdVUG@~q?7Et͜ݤu)ioBSvˆ+% Q-Yjadp|֖e@f)p5>ge颓踥&uN14zKƸCI/u6O '2"MDJ JEhU^ K+YiLm5*2$#%Co&4S`LoķpYQQUVKkκݥV!iyc H?}Ix1|-3mXcMeSc;:хYﵬ(%%HDЪT8ɳ lceǶ;Wtcb)K)ND*1㋆2G.4ʔBPE IDe2j"'=BҬ~r=ؙ,*&{ qpFuBB8;[5F+4f4皈e3^jGr=qA0oD(14~R P2MTS!Ɏ>T7Iן_7Fr~@.Vl|3ISILL;o١x1$vQ fX Ӝ>Y0e^֋`^epZȟ cn>fE͎gmN͈"nx0|-v\g?`4f jT}az^ZnzQv-.18~€ae-A<{# K,vO[SWue(%8r`:uіN0Md|L7d)o mY&Oa,-JY=Qڲп gҬ@:jec-1?nKJ-A-=i])D(~2Z.+\SgE 2jYI]mw/a ᑶem\-ت+'KFZ+RWU(Ŋ~r);#EYM|5smԿJqHu~m;CNƿ+g?j1q]5E.uO3KWM=2)dÕd2^dAm~y/mx]&%.P|^ɰ O߫ <..VlC_? aqD+S\*Q&."ixݒ ڨr5x=$"f0@{W(jXr!(;y7m~LTfxo EA`"ɧu9lu{g eui7ܔCI_4!5ig_' MM;UC,%>Kd}>^28*TaojmhB r!@zM2ǧe'l{\yRP+>2=x:e &ؾp4HM{=Ars#pb/}N0?6M)?@hjʄ:9?)gp88#Mu@ȅ]E*v9PE١,e郬ѹGwt{8]mЮ7uUeD yEI<+^.?~:Drv㕇R>vB+`}j y {u]']Kh:<y< $3Yz|Ӡ?8.DOi)F\΋.  Tq[0 INʲ=I֖(l-YH*j3̃0nxlPi-Y[ZR,2dFeII ;7X gO+o5SR zu^bLj<Ѻfh߁\ jfHʕ8` <-z|iO".ULEJTY2 Yή bg*܀"C3E 8Y M N7mxxRChSAfBM^n&W1OI>uiK(m'Wh=A%Cא[z@fI9hZzA x>d};ǿp8[rW˲_?2]QL?o2bobSc9G\ǣ?GߊJwok돟V&eebMU䃑4/A000d3/Au_LTpSQ ="z˴GVݵZk)2C  Zhu jPuʖX4=JTEIHF$KHxU-_xd&[ ( j S- UqF*w29R!ɳTIr È=†2Rg:c$ol~\ed"t/?P{Z$ˊٌL'9&PY?ɒREFxBJDqkX@9ih5\&: Zz"\ECKd`:2ci׺pZrqeWz<]9\ s<(qp kぽ2Fn٢wWcNr=惈FeqRiݭQy0IwfK`ʄ$ sl@sO<0qk=IwgGTYL :ًG½i ro[H##p!{7fX~kS"yz lND>udIZmZ;b8M26Md`v'sZ޲yВ0 eII$k [/8ՇX'9M5+}&9a`xtm{NyǍː_Y$? nS/QK焿Or)NǞH%0z@BZLZ lvl}h}e2-_bL82C]4wn: s@CWWQxN;HuD]BԦurKuR]ԬE!8y VNua'-I?g$spUYmwڮVS_F7mṿ;»c-yM}Lq9iC(,F)|P0%LQ\plc;j1b?ޖZ5AG4-l+k{ӿ @<\G*x_sӮznEdX;^$ +U7vsՈmݶ~Yx}?(.sy!Ѕ@̏HRJ R9(Et5#`wNJ'^KVXr(jI.9^KEiXI E-#l m^^Ñ=%&h¹͌/47M0![(ظꑵl}'N3/$5<z5U7@/ؖbVUk;<7)*]})a[@J}U1Ş!^G*!z_r4|( tMGvuAYN<(TVUK5Vޒ3JZ#4}E +ΆiEˇv u9U7ħYbxd A-v=E<]E=kGdb޽نlO;8}irBXZ3y[>Ji%Eo P=f޴͞ᑼ({(1`8d\Sۡ#Tb[>\N h0NQR"=n12aWzx_{SY4?* ꕄ6k럾fLJ2JS:vb؛ ic|ys/v9dgƒ`)$:_]{7M}8"b8c-#1RsUq#/bCyb2]JR}_dM]RWL:"ȌhF1]&Oq"E)(lpRKQ|Aω24%0J{[Q൛qp|Z#|dY %Icl黜<Ωz,Uu 9tj*%L}N#jVyGODRh1kg/(p*[~r (å\J8YxH$1)"za*N.8tjr6yA>+TU2O6̊y]tei*S{WGQjr Z10G.p/W6(vēy2לf(IPι R^^y4-/Ptg{7qT&b[hΐ>hsGk;͒3je1\EQ4^Xˊ)A$%4MIʐBSL WwE6K;<%LW˶]ŭ)0]e;+$PW";9Yw[dqozW:r=_a&~~UMk .  G=zwNIq<_SpUfn6M)-H$غHoӢO'@Wn*iDۻs-lܡ8yC8 8! xJfQ<_-L[׫9LsW@j1ޱf.<>O Z-m ?Cs㡣DTS|!C߹Bރկf-͠n b8R1ʦ&cgзS,h63,ݥr*u3xDu&ssOܱ8r#s3-cܹ&1{ =p}f9ae s]:&F n\jg6O\hJ5zҽú Lj+AG>[]Ju4 JM.ˇ,ՃH?撱?wfy>K$UȸV߲.'pI[%kL?(f0+0FZTu`NQAee 9@,0X:0C[̇|fR a2}-AOM&5FAl3,CY&/s"b3{ ar1 1UfOn$ZY?<DX >h5+ Oדg nKh6e,ZYI$|ћ{Pp^5zl>'UQ*dgkh M s;|z"RvT:unȳ~x-B-<]1ڰ)yGqϬD(THۣXS^Z,nOk$)ѨȌqA;\"8 }?rDzJy)S/i5VYu2vְLIC ($H{v kAY,(K72ӯԩ,eICz!8x!-K4кTHC^e')d6=/b24n*V{UaP,ʬO6_Lo$S^j* 7GFrױ3bL*fObgf"qt<(q3H*U9':j۠u5SirCFbxɸl"kCh6Dmr$O3/b( xoi/l,TU[ʞ#2~Pz0.?KRإr5|A* (2D=S>C2~cGKDRue.F6hŶ2yoE(8 ^jK&j[]"E"mH #4"Vcݤj+dti(.MLQfV,MiJՋK mKUg]_,HQ&uE2 QP{P%Щq2K4j,}) ҋ\t?)XGEBTQǩ7%|ƼX[]#d'Nx4q@E<"5/Cϕ` Ғ$X*1rBgĜjLy./h{d/Z?<^c#.xrV7vRЖ&O#w^r0wNS&fΥ7؎|lcTd)Jqi; ŪYҀ9ʂ,Ɒ tt3R75)TϓiqT?x˖ο"` g ՉHG2[$6d ՗WJq"b/ (+gC}SEw4:x3[,F 3 &wt6 l8𗃿b1_^9h8YBA.%,|qqO.&ϧRaO5(~~OH}?e}؏/ 2p<\|/0?%]7XjIYN؄灬繄!H.e:=."\E)NX X7l\ݕW a7h":v ت?aތ?>7hi|+i6-c}{ΏQ2.Oߓ/ގ2ZvS24SnϜ͕D9UqEITAN(#8PiFp;ȰDd{LQcdOGg2Bq\S3J9Wأ ڛ*ZhtRDۗ?DPb; _[rg "\ 20& k;[*pb;9 Z\Al. ^xrxbÞTAc\r?Ux5Rv |6@⭣ha! 2A $UN(h3PۊˇGHcI-LWBNx)4EU6/]w0L ݇ԙtB{Ԯ(EkغSTAeRR4BVx7%}5j5@ k.4E]6a)Ľ5(˅qF4"ohe i)BRd V0`1Œ"zʙ0o?-"i3gggdCvBO.9tAWEA`Wpؼ6 CY[v ÿv8g6* |? ѥ/ C7+eO?A]" ~>? |+@q}vnЮ޺ 7hW j;!-p΋BJdpaYXں<'ZR[& ĂFDN y);O S ')ŧ i6eKjbIsчf0t/J(CahNJ=MGեYYFcn! D~d=IpڢlplkIh$^=6`^x/X|)⯳BvUκ YWqvVlaEQc06H'mN*%IŅUljdo LDuUȘ W!c*ΘUEH`xbE{ʬ% n28ZdєP#hJt8,;t3BUȀ Wq|,QW9\z\3g!XsmLe$Y:j$MoT|ʾڮ)~2wQdxT+oVGPuh{U 7 |-`:GGzTfzE^S*UH% W!*N%5'`BhiFAcТR03"1e,DYx)`隳)F)[E(SLG9ġghqw'LJJDZ/v‹b*ع_V繂֋$y.,%9 .yIC li3Ɲ%L7%H?^R7(f  w A. ! ZZxwo 0x+ulYY.3_p],+ üg[b6sR _ii:`I*hNeXM飚'lU)EEi/ٰó"q=x-Vsi@.IFQG{'׬sb4"!KT*dMpsNԧyx\44N{29osѤ$HKZ6<¿0:H2kBh+st')t_nv8@t3HpfpD>C?OzI~xm78T7!ꟷ%n1GM.PNJd,HPB.edmϩeg9ėfq=g$PNH]sptkDIwwoO#ck*)P<~Nli[prMތB\"gt!g@9!n2q@ Ѵ a4NX&[+e7$f*r1E9uL)xSU-9ظ )c{r+F4lRZTFNj)[fmTuC$*h'z|l"[̗j/IQ,X[M{ ы-i$3GWׇ8EG(X6ʥ䰔Lrqiߢ/ cC3qÖB41 ^ x4zjh14UDud9O)tp٨9"/a0$ FA+Mn8\ C#ekk$th?"$A=D7뗏!d |fT{&m}LF+9Î"Ig8)wvY̑{ g :~e:Wǐ>S9O5V7wZ2f+/Ev>΂3PBy{ K'A>7दl՛"f{ܷjV \v vKN:#HG>۳FC.b։_Ĵiblhw Io4h1qf< }NOnIۮƙm^K $HO"8yq;c3EϢ/Y]^i!RX2 -.G,o F>] SftE'h@11 ,Tß?~a0τ<܃kGiaaxO a.q1gW,#]JJZ$Ƽr=Y2v3y|lxg?,ˁ[~>CS_X '˶9#<π?u5xG5ziw%v*]'OukheEf[jhZ(jʻ]DPdckV9mea+Q 0hA=FGi2@Jƈ&pWO&Gbsb-Gk6V<շL:ل@A}]'KCA zpt*+aiO(xgXYrÍ|(Fzqujɡg^[tLw42DŽ*DVfk5ކ~{;!\a$"B2D)πȯK`| 1z ĥEVHe} Q` y!AǙB2mvb*M%˾*z 5[k1KLV㻆uVy|0nŋYfpq{n=HP]\=>)ˁ)[ۛ:ONaوerV,vTAy} &a*Ce XsH* xU`7;%ʹM``u_l_O_n0;HX}9%JȞ! !}V3bҝDfu*l9#U^22Ϣ2][ov+BZ/p@MTp({I]w,۴DJC"mmhϢYoYKqp-h&,VҘG\8MA,F=Q%SyE+DJ٠ՐM ğ@ lMq-(uN*N}eG\axjRZcw8e"1<V+T c2aAY [ KbE]r0o&pԅNhCoœ @F/}hX2~YXvՎ][\\/^JU oZxج7\aoz8li(}M8D6AXS] ;~Dϲ}!hǬJ zER=b!Y`^^vi7Gd7þB[iE iέz?CjL[,-ەl ٗoi #6N(6E 58H  wPaM*6@V+nDBM6SxsiM4_Jk xW4Ju.Ѹ/q*)OxdCD)RP HJ폚*/^8Q׻/K8xA\yprfMrֽN4(nϢ:n-ūhǏ02TWl+3o Ҋ}#ۥ M$ir# DKƬ:6(rp'1 p:Lq-P=$ x"w$TNO1W5R]4Y^9䀝+FNOMԩS m;T+"ҭ%Ϡw~ķ5m Bq{P:0iAY;<^%hB.B.^ߏF&Jh:,ёedd9/;}(Cߊm4Vߐ>OV){'\LCq|~eh()&BD1T&q0J'%)̺{ vC`"(x^Mً?rBM4r)a (DiO\'rKƬDeOtr9B.{J*ɾ꠲s՝7ItDUjnhy-O N}+16DuYЙLYaOƧW1ʤ'9mNS#|`ئQFag(Ne6JI #@FwkCen7 Qۙ4[/.&`K8[ؿ%%2:+lVgbPl.w#K&(V<إj7^Ʊ%f{dx d `2=]rW iזկ,7;o_ hʑn*L]wKB)RtSafy )=@sm:mRVwPXkXcQ}2Z Dh2b in5FI\%C2CL, S<0uu Pu 0ܔm>+^ԮtSJ*jkuؖ,ūhxE &FM;5`vekrGxTveN:!d܁^;nL3XaSbKpՇ d0?pq00loLxe-8O~FkiB1Λ E{ej`gK/nXCOLom7'Z58HI(@~XiJM"Zlo3\ u)tv-Eqn@wϻ{kYj+f;'Ӭ4\@秾{ζMD T'H_L#!H˖`3a u*/'"mj{ir'+0 G7J׍y(6#67) #>bJ pYחQZw i W~3C8C"&uI1^ƻ8eM9)rn2U:)d "[KWAX~bJ3x\bQ*$֌O9|81#E@{w4.d k"TH5-Km ID9Tɐ͆()*p,)ݍ`}Eٔj^7oԂY}Cfֻx}].OL֍5#N&9VzG1D&mG'V[I~B2=2SyE:HeOU G&o1d v↨߹mߏj+ִc#J[!]-ko ʒs}ȏILY,1Ch-3I 2 @ɚӓBi4w~m.i[dNRiBv:O$V6MPďRгW1g%&橀G:btw#T(`;Y~f~%*@CL\4၆:RW|g8!t|J c#JЧeU[4~|C$U~sRZ54xK!!_ALC 32QUN{dOh:6$*Fk:]e#3ʧLDzU RT)^5|Iԃ;<0")_;w\^pk},-TSzŤjvtGFAG7Q"j$J~~FzDO-Co'%4Qn"jN19=?qwd(l<]g~=_c8cpS qy>nhyմ{kmS@ Q Fm}Da [Ƒ,}YdOz!d4n%zNOsk[gf PZ^G˨0\vi'?Tq4{MԊ"qEjfV* S܎k矾\~g]~W/zn;NϴQ/:D|) #|`?.4OH+V8JV1Ǐ>!БGop*4 ];|oxMd ~tA ccôEEE` ~v]h;87 :|BւX;MOWe'k26B*2!5:c"Әq2hD[[kY3!օJy\8Mgj`0J~_/+ț=wA֕ƃ|Iu??/q?ݷ,\ӂqnoeP<~\; *(GL+ԓ81S? @u?P=1SJI91S\z*So JZ)aLH);x9ur( (G 7QK&_tW'Сom_8|"% rƛI^uQ7uR)kvxN K[|+_LUn(v8Kj$Qke0ZE3&]_IzL.$ JX*V8Yz8GI%y!Ѹ-']N-4x U#Sъ=b) `X>x:S1t" kd'`´x1`5C'#6\0ΩHa%[k>?2^O'U 0g%3smgfYd{B/>M:MnmAl_jTօPqiG"0ζ߮?ǿcNr?nk)Ae@s%tF IQ'V;Hb㒍 !sS&yL\L[wy~VM[&\kߜe0àݹG&bbWč'g?vc_k8=-#?O,T@is7_Yq f4wۜjs )6fs r|g`v> )Зn̑Y&wB-К3Qpm-I\$[j k|7J?s ']HqmB9:igID*'tB'dmPDdE΅G ڇdhG:TpN:y~m:6JNpIy27 åR!*m(EfzWD?:\mc+5}}L&50no=T#=2l =r,7i^4-˗wXJXn2oX/-YmߛWúD:hݞAއ+1`rV+~$T_/ӳ.SR=Ύ~j &iؠsB3ci2$ t <\`Fɜ3r>c\`soD"+)ؓ 4FȤHkF=EU5P#yv*do"/jR5/*Ѻ$h7@Bt @>bJ2>]>DT2q^oͽl%J P%r O+8P_Bs(FI9g%L^8q.:Wncxь4fӀԑN'+oNTT $w3Hd1cԚ7St@9 u"ڥI=5"8J, E6k/:ӒrLh]?y%ٽ))CVt$ mܳmqC}Z{6mGKd1,nn&][F4ኈvnH0OXh~M*3bqi2 fXȻqplZipYD(q̔I4Z _Hh$2j*+]ȟD8KN<* Ǩ;1Z )vI)Ƞ)1(*Z 2*,-:׋Jp6+3(Z[5RYik޻y,q&-&Ntp-W'ZB"n:C%,EѦ8$"^o <\GuA"zB(2E:hE&ɀ Icс;1U cUB>PGo|ül (JR!`9€\ b@Q(Vڛ} *cQIPj4 U!K+gN2A!'BҋÊeq'FBHA;UL6Ÿ^DD b$ %Q`/Fk9&h |)ch VGK:Z(F-eg9>?DpC=|hd DXH l6MP}xI2asM|"r_kKb*bErC;;>aMvQC]AZ=Wx+H]}]g ˲!%P e"m_<Ʋ 'ńhvan/цeZ>zI5<=)-.Р}=hy_(V=0#@IJj0sh^$3 mbv9\kHPמtQbe:%ԫ/o! .p΃32ѹ$ 2!GoJ]иCEΒtDߝlgva촒?VsyHL3Rw헯%DHbquJ:y] 9.i fXiUz`f ?=oԻl?V+]ߡ Nsh1{YXxL)IbVB6ky!9?z~؀]H F@ $XZkaΣHVcXoЮPaPv2N>#DEOnt_MGw{(PdiM:3)Gw{tW7j;=ۊV+&VJ~H )<Xk&/Ϯzg:$8#<^Gi`\ٵX9B1$w6a fŭBZcX?+S?S"K~b7Apc\:/b$]]vZnEo///Ra,=%r_a,6vQc2,AͽjU[Iw&'EDtB#-,:}x6~cd6hdݴ8127H\'͊F=7l A75fl,icoz-Lm\gҌ7Oo#h~خbWwO(tĶ藿'gܱwwdz16ٻ޸rWyه%%bwo;`I:s^_KSS&A ĮR(HIYXbtWE&̻+ׇ3m?:ٷyػ H!p?}?Zό Q}ÃGunWjf_mg_QG6ڌKcBК^}ZbnmX3sU<{3,I\r??~_ߘ"ASu_Ғ;s'䧔;:b O:QrI,ɓ{L qsJߛLӼW``U`A%oW-D*Suub(&_g PK]i_vW\#bU s{ǯhNT˛3"^' ,JW<\v2 ca䴚`V *F59 $uHz ,K UI$OdxfX=\ܩ$/BYwTl5D8.yC@q=idr3#V"6< v~g&3yHv쯒ϫw*:vF6/)ltGR#:F [,JNZ{H<{-0GR{ L)(K`@BADAwtP\%0os8yW&l!#%Ap@?W\Z1 j(8EZ96MT/|S9cT7\waS/0Ԗړ@{ NEkjR#/ rQCc GVQ(fFxFC\  3|uR Z55e59rso n[3m5ܶȸh8P(YJDQjL֥A'T aQC]͜gʘ\Y2V+KˑNO)yؾ;yVrd>GE^,[qי( M^9 Yg&mKw on\QAnE*{(("ŀl bI+ky]t(MOeIQv9񝜉iPr&bJűp8$1r$bҼExDw}6"(ӷks#ܳ<4GqoSBP5DP,8A~ Ғ>9'(9#F.lCq1 b `ݮ:uVW"6Ê;ſ:4ێ{1Oh}fJF6PW:`}ИEs)Վ!SłUPC]t j'f18%z8/E>Q:=|Qܥ#uس9!EN =Y#q)ycp@gR &dz:$}XO)ϸyѴOv)3L Ӭ2ÇXet/Gt;Hwi$S`Ny`Ck~fcdɇ%se9jnr]Oz\%J Ͽ}aOVSȍ<<}O?K|mjA_CEk-QwFk\Bj:X[P۞zfF,z9W_|jVּP~lP*i=io+ޯsnt^_֏>2눃nhe=%w;FUni\KN#F ^NΟF>~x5U{B;^7LT]^YC %$|TDKﮫAh$<@,ߒ围o,v9v;1xRQGl#O<^h ` 1 <|`怙9/4 #=`7}g2ytX,&?M29`Qef8J;4|+ȉ#$Œ-S<&%m]+1ݼsr,9.n>}f|3g/wtb%0n-f|f%r[" >>.팛a0zN'թ@ #!Pp+c ъ?rwWձ7PbE9g9ׂ:$6~Y=y\!q> >sLReIlkH1Vs&1Z*K19;hsga԰S.d.K8=m"Yyp 4J!݉&?M]#Hstk4~pVTcGL(xAP N p5N_=ʤ`Źi(S@&/v,:`e3=v2iGN܇pN*g#Ӌ@4];?xbGRbmj|#it%pU/{M_p><}v댮{8gn[4:Qk…sd!Gt(,#]p3 VK {I,)^2TQOp :cI,)I6JVGv.s^j}:o4SEW8Qur>JvÃokSK]F]M㨬N[ŸS.R FQ$,68oᮙ#ym37m(F ط^`v?&i O ک͓Q; l@hwC+gE?GLw*C4)`dlƅ5q/q/~[\cޛl\:zVj 0mm\~J.޿ļ:/W;/?mg˓3/'UU՟[:Z$Fo|56 `=jv٥ ]葝"bBkHO@kBU=w}Ԡ6yU[ ˛Tv)BVmom ; 7ᮿg5،@N }[1(CrH\.ٌt\Fj`=bYP q\ߨpU7%yM+HF2,g 4xڋҞ*mJZ{`U]AMBR;蒛%_ps~/…sW1"6,FQAZr K.aʾ翑g:݆il79fƵ  JTsjXAudUI1\'ZQ@:-V}.'oxcǾuʹ=IH8PhS׵]]or+߂dꮪnyC{<AJ%Yk'>ҳR13]UTׇ37)-b6)p6@F[EEH KHp}(6`O.D L3.́(]S 7 Шg$TZmcPWJ5--Qae1AG]ƦE75֜-ܑC#6k gTRjl[;٭ ;72ןPf}'hBxK1) ~{(غȷ[pPZXAea :Q L(FiYzki˒Ri1ͥ9pCUvq̾s{,g6}s̍0[T썡XSٵ$IdqgN\ ،>ۃ4L< 2<}. 5\(3Pd"ͼ +^(iA:0 kLUq%@ZL,;*6WUϐp}ud2?KQ\2Q1)6 {mcKă\;p Cϕ0m 2A RlLpbVp9Ep{l٤`ђ[79{P"@ y2|},JȉcOΑVԻB(Z75eϪA'ǫ.F;l5䝔 vO,#XLmQ\d8t&%L-|la=19y{ Ԥ?75 &FRnB0Ew$/GqKMRfM˜&> 7ɜ ղqc{7yLrGC*̵|%$i|̬a*KT(GM5hxQ_m4GSO=\q~?md^uhiv?_.y2jd6 NNlq.Ϣ-8zNQû|RmG[:Um}P)%4?꿖h0HjMexѭ:izeəؤBZq`+48PsRPVcrݡW<:{g;ׂ<'P>tHA{xCFiKNBQB>McYa>j5&5sֶP3X J@ރ8(2$&l|PmWɵ2Eك;ز$vԢ:!xl[ՙj!e'/S3!k%1O`ȂN`MruFjq`3qP&(2|t)1e+GOK@XlDZ_soQo$jl\7@Gxiakh&c  -jEpoGCrʑ<~xšr')$\(`5;:TJM5Kٓ-dJk !Qv-D/+liW?Q~X/^c9cO_JS~;T<fYSBRa$lD0&6 ܒ.'nM],(?־8Q`~DrA<^e7__sw=Qx~ּ$/ 6 0oËËƓoX|l(YIJ!^VMR a}ї F7PFw3>:ˇef·.X o:x4H̔q0ݵ[yƮPӾb/K|ֆ]xΝ݊OyC+ Zй_܌>Y j~Vzk>`:"@uԡi2 8c.3TҢ:ĩHQryR)H^n@K5u 8d( зܺ#w @I/k٬$ bW^%C["HON~qc9vbOkŅ^pK;XG` ` /s[^w^ aЭ'%K\d{ lm j۔~0l)U,Gޙz\Ս1I8&5C`cMι}vM/&06nar{` ]`mۖ0؋QRy ug5zq8?r7@#>26(S|wq[,S4 Doؖ¹ΦWvbK3YV9PDc3qQLLRmv<_%m~̧[Ք1b=W-: "bό¿},\JS~$4x剫>ڤX]3iFDM Le+1j'Ψ䝇k”u2ZH\'WKlQŦG:p†d';DB}Z*'#^&xq4u!3U' r林Py_|f/B|Ua SPuJVlY9H,.nU=ڸfJȡ1iѕKꗭj  y㱰Ha_Kxmӄk oȽM258"{P7u2ǞSoYFĕT%5#IX %5ߤxkc8U`PX[R&4k4khjA8< 54G@ϏǕHp,KkJg:kIJq6{g;U;ڢ%- !rh-ZrSR dL˲EKQ6czͅahFDQKL@$8 d)&8&%ǰI V`J,Nt7p>op~5?\tҋDr'|N)cc=[V06usa>^#.R]X~:*_}~fZo=$!g_/>_Zր gd4g^^6^aۊ沐P׎ޱyibFUo_/o|'o3F681웾 Fa)Ǎao {־))54kۋb:'BWoFv뎡Y7wW(ɇ=Gp}FG t0n񺽭eN9PշNeL. ?{@_\~+z9?6uz6> TS @d\kqCw޲sމ.wNe8mG!`xKHϏV!ѣoa9ѥT}iQonhG yLܠ GWq]|i|Xcgym 6hw铑U}+n1[%ށlb4N1q~vo4SSjvOIQ> ~<'JxcfXșW luCޘ!ob9n3 6xꮾ hCBo\څ1YA;+{sXyvw`>`q#v|R*E|yajs{yoLIbՑm 1ޭ%r$tɩ51%jS:9+Z @ao#0]((:*"5pH'*k$=rb'D<R(okX"xN#vvXXf=  :K_._.qhl0 dGE(;hQ‰l"/[TULmkd ^9$=ܶCYA9b+ " ɅRkBoe EHZ&G~WƱE@} G7M]ƳzU*4[?Ψhx)YZLfJZ`bɑmWK!0ƌR;|Q(AkAu0N\`o7].U5P)[f+ 0Sgw{MiNWd'Yd !.&>k`! B =:+=6ߢ`Y~ɫgDseXm /T|=o\] 6T'2N`7DHNTN\'ǹqGJOv׽Y9-r5sOJLl"EXsnMBuliIuEdi}1F0&}4ho}CI7$ZC{"EɉGEL5%qsK#>m\h.@tNC@_: y3vUc|Fd֠|< `bJC~k\iK9ʱ s0pt] p/;ݩPk# 4)x0NV>(GqSc!>XB[,C4 υD:ΐ}OtAV]2iBgk>͉tkUa}n2ͻm2ӚL&[Ɂ쟛LoC:zJ2`Sy;8Y1H*9LӬi2u^EkzĨ5RwCތ97Xg3V LD)g'v/F(Rl,nvNTd#d[ !Gi<o.$=6'D^t<'> ~D9wNo^/]`f%8WT=jz7^JVV,Z֦r=, 2YWHާU}NTf XcOo{ ƺ+[ L5i We=ǤW %ټn "1n 2ݑq=Aj X4[C-Tї{\sm-74,tUYɚݐ7D!0@c\~&;M~ώ޼k^˜GTBH^}?MXJ@KzaaP׽#+&J:BYaWtUP! `ݐ7vd#~\:m  9L6'! ~=] fhɜkEfț-DȘD@f ^n 9&ZlSD zI|vM"&s] y;{~v6ul/q`E0;ѩ!|%Ba9BBg]eG ZCd7F!ܦJ[hXɝvl%{ghN\~x'A9sF҄N{K0yt9NcW;Zi_CUWb %<1Ix'3I U'3Xk03ԧ$Zt7ͤ8Qac2sOZثL1iMsvx9y x=9~1/F՛2[UY[cP[n`/IhQNH((,E3:'5v1 yAk-w?W?﬐ {OiY +ڒ}UqcrӇ~fׇЇz3Syul{WQ +aO 0R9.3dgi^̐3g顖{}A`}4ݦw6mߜ5s= I<B fhRYp޹MƖɥn8||rEQ┩(, YQA+lsɰI"tJ%L͆)ý&&:[  ɗBbȮ>V@D[j2ޗZ01Z Wj0.fr>C]6:7sC44ْ$lÍKs73}r[\z. H\S$:e S= Tqo,yj=SstjDe6S{iz+^!rF;Sȍe: > !9N-ȒS-}\ u=Ï"/J\׷iÝ^u-_rևOK<:=IG7v~SOsO41Se 9BK%P! GmaN Ѭ<5FVlC=,^Lьfg4FOsnુvAv\汚B ̲);0zv3oоr\_/ٻ]VuS:>VEQ|֬$V͂M*tV#H޷qjmݾhPwLϽ=idx!~Wr󕫟ޯ6W @(bԳ_VYmq=~ 48oi* ̯EYt.?Y2ˊbWhZu>~i_0YD%E/̈yʾg%MWzZ߿wհ@k\W/bzϞ˫|l5߰ϡJ^;VvzOwG$C܎)K2O?7ZZo_/?Z .þj.W|=}B'j9D)VDD6Be#x"]}}٫ˋӡ˹;rDXNF/;]yMDr#lҔ̱lj;y'<[a Ed޳qdW_ݽzש*fWʮ؛ի=ĘacG= d)`E MWyy}G \p sX-tfYm*a;Yخ"!!|OѮ PtLȕZOq/?*5՟='z)!;hjZ;(*`M:x,]^ܨb5+UjBVXh; ^hg>x|FbywxE7 LLE 5"3[/B&@ >SZԒFibgj\QtSyT:d$QK{7RaLUǎ_sd團Ԃ÷UhDz8:>ƱMyWrϞDҝi{mY07h+T&v}v8Ux-wam gMN/:M/@Iij7NT}ݝq0xߧʙ#)MeBP\E(x3BK=m%xݝ}Ήۖz6֦5'Mf`饂ԛ!&tUPӝOu3ф]SRN-t %&o#~wj>rs)t.^ .~K!W d?i9~qKUQ {%RJƺj<\q;eF.hU9? (?;"cۋG+ҩJLɠw!ĵ_E^QK={Gyޣ|?{ih ++Zj(F 8HsLheQX-3u pEs5 ! aUq/l]p;C*tbVkV/V\.j# pS㖡 e !6?MԷ'0O--9T ( xc8޷ W׫4Xa R.IqNOͰEyQDY+ApNx=ׄFֶ 5:6~%q$]VCxO߿?z?u d̍ ^ޝQRk8"Ojt|5͓af-`z1F ʵR;&+ޖ\5tgCV]6n@P;J@ẻ0u0MNP\Q㜘*(bLq1 &a$^իOª<nR`i"(RZ=T@QQ$s>PHPB(U|'B2xAX oc#c@_iKK!p#G"sI4K)xV5+\w"<.^[Ղ +O >/'W[OMm*9O<D)Q:Q;#jUym` 0O=gȎd)&RHb"E=&҄,[-KT TLeG4 jEsLQHUoƕrd9dP' LTy~%Me\LO]u1 Dvp9)~WCemSӯrHY[Q4L[RDq(!mbpmAe!. |6`15vE-ݕ6)u1Yd89J>R *31$6,q#>A#XB* Mm8C ħtH,)-q#nf% lhfQr #n̍)e[&w \mE՗P5D wA-|;܈Ēk lhf_W}0|č,0S˿ٚL7ąbޖBmx.Ky"(R"(-6tx(7$@h>Xp]|SY$섣'eBa# T-¶en’ܤyAϢ$(#Sj鍶x7HV$D-Q ̄f_r+#T GHޜPg*#z#E,@J#mH@*eζ+$P6@Rϣj]ǃ X;֬5 Kiv.iMJx^*iGkV61]Ւ5m~ 8<ޖ,[aFZ@|ԧs"6;9㧧`r%:)B^C@}'+VJ,_ЪD>(kmB\n8Ӏ?) [98l} Yh49n׮d̀~5pRECi +l>e7sw)h˗_T҂Ɛ'8f5Bi[?oJqM./4k/W Q濺Wзԏ:TC<7XJ '0bOֽ;E/ rB"jɂV6S]?ڦ>lxӵ|i?MMܢChbP|Q6n¼FB‚/,EJ2kXgP^ iHSmv{7"JCoC!ǐ-Os9!rA^OL=ſN6i-(+:ZA_:lS:ww%1ch Z6sƵ8ؖF^ogS‹0nfӌ]W;LYa|gZ s2ff=yfG\ckнO/߾[oCDV{~a941\Y\Y++re%Dse7]j=nCN !]dN4Ϫ-N) mnNK&)Kmc-I9A*h}ԳO2(K&Jg֎z)@َZ $=tOu@(1L/P =hkT9,H9~_ƨ R]Ʀ9ؤz <+TzSI-Ly=Z%H4ME{#YCBjx1yTdA.sB%^{F/JWT̫4B('4J 4</AB9o$ hGFA'f%^%z~'?ڮO/ǨO fFmn~]"F}CAA[忧ACs߹,} #,wi$_A0{g׍@jܛ(j2  `'(=ͣ`WӀئwթSkHDX I,l2ٜ52q.1ZNRB:ʂ:_5A9نwl\ ( R^RiĨ ǝrDC@QY7 q{>G"̇3nyO~,VbT=Lm<E|b  ۾G,p:@6N~:M?O}:UC"I[բފrB;'DOQ?B'ߣ1S;dA(Rt8yiJ4D? CdYseZ̋ Y\}\C>檣B6_ .\׋ [7cHs\`pь @9p6g̍D:1 `^]~ѽw6nD?jEEX{-;LɽfV::eB`hLOsiI,__QtPZ^:<٫p텍_dq_D"-Qw!$D1U hc-5EIFbĬ5\HOd&[͛i=EvFiEŶ=-((mR5*/-˷ge#ǒ_`|q51E $.Desŭ8Tלx$7F:Ąg 9'9H{:s̲M_Ԉe|k%`%>mJn[-hV->Lom]qnl5_MeZ5&Q1K V28&{fcZo=4k~ǹܝM@8}xL[5sMxiFH˥6gXB䌄&K+P&X*q'r2߼" dsTJlrTDyI#ڜ%HAT`jKC37iC> LYHN!($̦*lMJ5M$fQ m&ů˺YFQ?e[PgTҚjT{U]P/0 g3A<.j$B\6NwG/w(f `F?>ׯ91:-f\Ie"7ֿzջzmVvM Z6}SnkHV'o#[r4cw5.Fo}p2m䜣S*,ف4TlQRgzvjv'~#vgٝ!{= ,`P{k& *fq<"y 9%\n:&c;>""|, bvUȿW^8MnxuQg8rp9o&LBD+KYmi2M ` # MM&$5jV^<]Ԍ5$z~0R l ]d)F}2A &ZDC3O8@V%R.o87Gf Jw`H#L" e[r+iJ'o.7TOY9ޚɔI*@#☪k~w$ j~ P8B]%k.$&p6B1UK( ey1\?cO{Y&]+-gy&n"*j㎒ZɗK-hPȼ4/ݴHgT.SBMZ7bOWiYMmko㬰wV-+͚]aR8k B)-V0y[ ؓy&g.&4bF>3B|lf&Ne+[x֧멞=|ꦼb}}RPW^Ӏf=' ԠU:\6R;<]Bg9;&m[+!w9 ]D2te*C\˸9@Q;+)%X(<BA$9MEjs5}ּ1B8W.[NH&'Y.&7JӀ#V aiΐ'X=A;SgjF=%;葠AL1MIH7deIc6#ٝ^F}w<}9_ -t*?Y I is;($ =L:X2@1(ګxD ΃nC[6JԅIrWc0W41kK[v czr&3?uAyoUHU*=Τy<b @zG L^(V7)&>CW: Q]c`2ٳm|I3oXN%|D>)]sҋm)> nx@3β>\OeBx}5Ki}V*+>3CcS0O_FjԽU>r_tW_c?娮OǿC`{AB/1P3zOW=;+{]9\ 怟+HS| Ӹ'4(2|ptt9U};{3LY%؁oo_@J"U k(Vo//rq`!篱7ż}cݿf(Rc;튑Mn޺Y_ X[18B12 ˴,9XL̹:I氁fr jS0Ɨ^N2ӅƳcw8OSC'ޕ%{=Y1pX!}wOn-yu)҄6Wf/FnA}IYs #='_|ɿzK|ɓ/ٗهKL1&3 (sZ0ˆהK]'_r/y*d4 Kߑ#9"[y"O^y!'/Eȍ^3Y6)g<2R *ɅqL:gzGX榴K:5Y ~\߾w*jօNmdi} 4mp(ukv; X~Zc$޶NIڀhu?{B'6@(y͚ X6l8=-X<6yA\zjT& q{iHJyof@P)Ɣ{ qeG=y2{v{Q^QBiM-~5:ժA %zko[b6vZe;U59VNlwUѧ lؠ30"c22Eu!ձ)vZg_'.ۓQɓLaMOɓK]ll4aKTx]}^"Cu_9 pOa߼x9{OX{:o!8n(xTRLq{}Cվ͈1z~skK !4O/93 %R1MVKE:>)7Kpaw ẽi ^ yPK2*Unc  ?<!ڜ xWD-v0m`:;xw @Foq5U)8 r17aD-"@Nsz4m= 0JA.:@Jz]8'2*НV= q_Q)n*qSSǓ Ҝ"e;* J%cyqcI5ϔ0dP! D=mҋ'.P0NK5nwMq|hQƲau< "a ;-VTu<|7v3\VwDTϲڷlK:m)^FSqHa05Xo@r u4n\ĵXgj{ȑ_I;fLp eM&:ז[$[-&-9"vTX$EYl M0=H -iC٦;C0F=c$Xlǘz<y=t%3j_؇8n{>z9VO`=[]H@ g\ $\ {zmLO= vpL5@x L\5j^#HBҞ4(gӚ.*tl f xJ=?= NMbt8Ĉ.+MR2 7DqFƙ)l2bi̘&麒#8Ѥ6O_P6}^ַ=v\vAc0ђKq(~ge5nm-ќ@n٦)34~|3u?l}*|s{SnVB'̈́C&ywBW:.p7~sq3]|`oI7U4|]\f9wveR GWi"_^z{;oi N]mM}9YsB)%![]UMOuQj+3f\f0km(^`Ԏ`jZdWJAuW /hBV~W9A*^Wo!AK*#Z/ݶW}"B7KI 70puۄzL-hq2](ޠ7iM3zvϷxtAf+^r W?}9?}4G{4ͯ˛9SҚW\嬮+[;A_Y5IXʜoQ5bMiĬ.f?f7iz ]IFmo|uYxoKUFR*'jZPgJ0b׃#*ԎUF!bmfXi \|<_X¨eigD45ç@jjUWR0UЍ+JjwEpK&RSJZʼne!i&սD"0aYMלf ؽxUf8҄f)n sfUp2`1rQTŢq 5Ix4Ģ\.(gI14jKg xuo˥\]|G?ǔ{L,mt:cow<=Q͂1N]?7~FXӮy%XиL(6Y12v>l!9eJ 8rc;+Y+<5VN@Qr`ъ&;D;P1 | Q1 T! qA FaWv0=WLRX 00cmkl[I1Ä"Ga~gw%sf!A<\j`$ȣ#P:Ą:=W)Mh1P|[M(zuj4YIwczVэq9sz/wz]vVro΢w׎ұy/R/6(µwYKggnlEB޳=cL$٧# '0SsDY--9AО~ F Ef8D %pahd]Jƙ/H%B^ʨqBg.zB/p^Ezg@hMI 'NxxXx ޭ)v+dޭuwBr-)8xлF[))S9{4Qy⁦z!,+7=D9pS*TBLdJP" iBEmbuyw~GuQ \~5 ڟ][614v|LlIdLM2c.ry{q~N^m|mW>qK·W; MȜ4NAh9tl9lM\2bsK*O(=k} icD\=i#%-:_A%C2R6{#oNKMX_xiOg@Υ2Ԇ4<< -+`2tkNiX95?m7kwSjbWW z5%@geGH=/њ[l;޺003L}L⯔uٜAoJ{i/];=D.t!\'2~sFtG``uD,Ċs9,dXʩZzr+EE RS{.g75yP\&$_PF  vZ"/ ?q[ 9Q~2PCJ#:KZ=_Ϸ?JJ?+05{jf 0_BաE`VԠOM!5 HK Τ:=ItNR~;I{w0:=; 8i 9`nas B5t<ҩiKbd mQ ߛ$ӽOOv1{hT_!k:-DџRM}n5":x!@08 @ҁ]v64pǴ$l O=.C _ęA 2mK+a>ΑW7ry%7qRcߑR k)U,]kjBЕX#TVZk(h$}UHux#]?򆮬4W\rȨTa+ҍ\r޳SkeZB̕> #ETQUUWΠXWUYPf^_ݘٻ&WX-ŠdQYg߇\J5 $Cr%T$HKU*i1ttLw7\&PbcMMIFc m ,HnQ@}f[`CfM-1_`lBZp<4Y)xb DDi a:'6QhY {xZEVv gP,ik'r5XG39}:QV!bz~Z\Tzh)(oD|kRD5u˧ԉj D5DkiBc p12YqKl{YSDDj A?ə\ AekaީzZыA<:F1=DA =.zRWL=oBBT14eT!.к脎QǺ$jZnVnMH3 uº脎QǺuEH֢u &ukBB/SR]S:[݅ysD':^ +MDv<ﰷov;5Z~㬋 4QQ鵤'ћĂ"mjm*o(FɯBnHH}7~)U,q/r7Z_@}ǀ\s܅?s0[F8"k܁ l~_-]NФP{Vd/o+5>[J:#Z5,+X(.AOB ${ @Btbi9J"jRQ˜b5 HigA2 %F!9mƋPPZ'Aj=/jZ&E;7^BqƫIHkI ì -ir=a]'MhI1`5iDK bQ-foTK Y^K񺘔]&x]C&5Z26)8h g! Ua"2dddHODa?w%D^"5{8<⬍ۇ%K^GQ^Lb]zpi<6HN9FXyJ}h!_vo&xZVwkHETDJ*C8FP@aNbK1 t7[׽/n.4 hǰD:c55NrI0)$ BIiT[4D,5@b (Vb8QQ.DuVyX"-W8)xV+fF9/4.}X(KA8{dpNҔ' N GOjoR>1`(%j(|)H(XdEb'V .KTI!)Y, .EMr[+Ή~ >ltki cY5%@i#!Hw L>i̾7 ZcΧf/'5 %%i;E6!QX8iRFVcH9!ShCE.Z vpNŠu;'pH wMX!OאW' աATS9-WY|ʓ7,v*n\E>6UqctHl(3qm_XiMةg5:<(m#q}P>!|6^u$cfqֲWRDay]LN LB1Af`/B}#boC⤹:|jd xN-W*Hх4yh¤H\*!K,) R$~qQ(<tۻ,ozZӕJ"0n 9˗s_u$rc]a/pjzn[-db"0#o 'c3|P`V~jqwu4Kx||j-7Lo Έ}X3xbÏ^eg-&9`}p  CDI4fZs AK/:Py./| x= EC6N}%z{ea.} |(h_BB'7DM ?Æ|BbgsOK`&1A&6E괋 0RNsO`9ebɤE! f@K,XE,ILxQc) Xʥ&IqNhj`DYeKKV,[,a*ɲ!:-" XMWNI)8tQs)OyJLyl5oǾEol28.2UȘcJK5xW4t~dQE7Y|#C(lF˛dy/>Y!yL^ / `{gAe|{!$~qw~1oww^7uC %ԫ|p1VK7?oG;oX SV>*|3S=7^*|*9@Us}\( M(2i]w鋪_ #.Oޝ/f Kc>Dô?\s3qy, x9Pۉߺc^ࣿɛ$8-c^d*=i*ub.xdnxf2b[klǶ,,#A13b`Jq|-*^GR W@!X4qF) ˲N}wѐةO `8Z+j߽>m9;9җ3's+Oby[yohMCv! otУ4h@klF&QpҪ{ճAG6$ ̋1`oKijzKmĆ3J藜ɔTT MM0|U$PḒN ɩ3Ҙ3d2c^Mbmb#7XG۴ԅǐᥟ:$b/u+~'StNLhZf=;/68>d~?סBhu`s$׼ ?@rLy( VRQlWTw5aTΞ5Q3]kEPΟ5ֲВC]gM$Y.J9zM$(5 Ghi #$cidZ*dJ%3(+`@؍b$fma7Wވ6?3\?b$nrl}'niv0I| n>Ww]uxfS'3>vz%Sޟ|kpV ]>=8ɷ ^uPf$䙋hLI{qRܡu A u.2ښu &ukBBɔI/thByPEtBǨc:3RԞu &ukBB߻8nʃ*:F~>Y`BZ&$䙋hLb9ĞqX7%x/"PTZǨuoU1[(}$䙋hLQ!к脎QǺuy1ƭY`BZ&$䙋vBh9+SMy&2c h0gq qǥpw߮osh^QxK4(xᒷ^Yl1Igc^~k =x5ɧ"׿lo׌;-;G/ {EK > 'u+{([yC@~[On3_>1 ޻7*zibRECT 1$p%-dV ^5[Nڅ3٢c\fYBc}Wۚ2K*PHo.z=ۣG"U)\lmmY|;uӧ?A#M;\N *70x"jؼ2xi DNS9B9x(DQeJ8~XP R <%o I:lݳЁFu󪽀a_ܾO8N?~HYL=d^.>,+7s;0{?^Θd#0Ki~v[u4z=Kn~z|GFIy4_3㛟)Ϗ|?vħsg۬O{#~ 6KXxlf =~I 2&;yojGt3mtJEbJB8ZvR2 ̰TiƭVc'8VN9H&|33 juoLfm^2jv??]kV `qdVwͶ ogלEwj5J848RD f,#:hC`W9hp'JkD SlaDY[r3LQPla,|NՋ/1㿮=DbL_ g"X@hێ]*KJm|!IOjJ*Q {慉8Bks#T'w[TQUmPٶmk\N6ގE(Ww#NZd)߃Okd;z.q2O|Ѹ( Kg!AX}FA]]^ct>-DM K{dN۴6/+\^JK5UoA..o CMy{2dn<ݦ~2W$!@+Cɐ=#[C2{}aw>c0A~ 7cvTٸFGv5d >u4%kiuK=UoSmCvĥ5ک"Κ#hsy/Lvzç / G3*ZK۱㣦/n힜Spê ֺ&FW0A#Y=pohތ߅ D 䦎*|0N]:~Ϊ5H<2΄;gXHcx_.9t:ElB#M0=~^e;J?|.@[q!ht[Ǵ|OJӈ]^uf9^r>uWd{ 'tiG~5pĐ-CRA{5uԧ}Уn:"޳$W}ۥG$9``0dZaUSEjJ6Ed]"UZ|/lxniHhC.W]ĒNGo.nr1!RjQ;cWj Q-/+#u-WEأt+!QU~T֦YI3oYZ>K֙3K*ZkI];cãWjZjM,G*FTqPƬrk 14b VD *+BdQ# p{ M9bjVsjAM_R(q.sSC:<9e8@`##:pAD #yBv_3ʮ:޶%"y9@9˵'m G/Mg, S3BDpbH a d8аeUZ`d;&qf!g_REJ$ޏ[&Njs1}.q^o(^ҥ0.+Ðjl_Rb?rFZ\s la % ñ0DZ >jzU[A J6q)ӧ̱š12Nb*b&BhnCH |a0THc&c8G1 s ]vr9ISTAUnzW| ʡЫHA!qnէ:铔mݥ85n<9@{Z^k$#lJ#, ',56FocŨ"QAãXj0,eGǯ|h I*0&&LĖ`9<%@7<nX$`|eƄp-"&R3p#8 4"MH9Anۘfcz`4|LؚHXi+)E$PgiM,1 c 58 !@!`@%"IXʝANJRK2!5ɧD Mb&M[/,xpˌʡ_GYDJ@EGB7L u&f:ˎmLoDn,̻w,ԈITP̭@P] 𗬙N5Y9Nq'KH(ob`d"c46X* TXkE(5õWaӼKGX} )*ʣզթPQ"6ؕTϕ*,/].Wkrk~1c /ևD+kx#+G)5&t R)p5qbg5tFZCX`!aMDIqvg:vRp1[V UCyj;YvYlQPBGב/7BdJXV'P'.,]F3\{'}O܉|g[3niy_=bX#͂l6t`8~E\h6F.!dKqmJW_"ng%si- [xg޹čY4Mr_9G{u{h&d:vơkrx׏6{cދH@&D%&_Ox% ~+=Qctyք#Dۮ@kX6b?Y F;榛G-2kq1ֈVL61X2B("'X*EV`[H<.21Gt tJ*$GDtuONurF>iW=!̘979o_ f:Yg&J}#Մ8:jf?>Lǡ<)`X79^$=9K3 TTE͓~YcC;9v(Pn07}6+n^6Eв%-XQ@e !8v7ӅW##3}DS5h_O$ڏ74ZZ]ߥ 1<dh-j:,E}xP8 ,v5Pin/dJ@,aDm[cbƒ?̷N6wE^\8zw+[߼* &ǣfDE)I}F]}I=N:b!xsG>?yuF5HjJΨyQ2?{jiU!V=BtL\%~9ZW!4ous8;Pq9HX%wccT"72F+fVq xHI,/4yĖgU^1 y^K!%5D1t-\'haj+ W39' |2w{H5?gGi?զ}%{ !Bw 5 +7ţ)Ds,qh{VSZ9H*Y- L2cc"O,V!!\Y*:Kő qH램 Z}^ՃUQ!h{N[W6W\1%سR D(b=7KZ4ƜLkpّS[~70)wHnu 3)GPօ?Q=)-.h1NK@8Ƙyγ-'3)K؊5p'2r7wD*{ {Q (Y%F$>^Aq$8E wPY)x6p1W=G'&zpr]EgqwS7/jZ 3&ϪpUmq L}?e[D``g3=&}Hr'P\ŗ_=ŗ`fc6p9Ҋ`{d]#[*}a-dŲc2GPOe/;K.pIs OXj@N%?n۸2&@y3kߧ2YIH^~mNs!KTV:2$k@f  F ic[XDV^(VXkU xC |56/!pa݂E\ko:4tV4' Um؉K;yYNYeu:t =89y?_?v;>ۮgq_{٩&>t(@NPFe.+\)Iy3|^s/[sg=ͦ-ĮS9Φ/i?Obͅd>V7twC޻ݗ\  ?ot~It<|0·8vz)Eg$[ש\vgx7"Y&Ţ8W\q3b 1¸]1%{ځܓ7\>r$ZL{:w5yPt5Nܟ>7wJ?|G3UOAÁ=EWfΙ;gٻ˛iK,tF:dI8 a\#)eeDh"c %6ĢsƋwwI?sa׃;u_9;ͧ@,2uf;;K~\Zgz6+ފ[ɳ/PiQ|M6^Z\m$N~gH-#&.4(ls3g IenݻciJ^q{=5W~%ABA;RCq)8daZ3ػd.9ړ#r8ˡtZ~~K8  [nL ˟\oPY$k񴫿?_/l}-_lzdL+A(QTDT,TJ.bh!J2 ##@LJuwT5-A5H0Q`NuAF9&p-zʏR] d zcJmN#LQD#{#p .բ!D=9x #+h큎CtzWaԁSޑ6iҘ☵dA>bgK\46)28:W[Y5 F@,L>,q{g o^+͔F;:=̦#fj03>{O6ʠ"Qm>Lz(Q=Q1w}||)$4)@1HbI UJ ;PL"Uiqb4]^\HƈP"bFY1ID"AfUh,y ʹ9W9kptu+Q`DB~SLJG~nG˜EgEǞ$ m#sQX]#@a=!#?YH[s6xC*%mio) X/gG/rD  n_WYz*.'(YH9"yAKwx!`3]t p qJ րV;f'`Is"sZ,G OW^J֑8]0-F`<}5g*@~/N>㑚T4QE=C 4iUA;o elNrL$xrpjԢI>Q"bP8d!HH L\G{M.R@ˆjP`n7Z;6GJ"qAQuQih B,P ֶ|PJƒٞ 'oh߲{X蟵8nTm[*k') cG,%G}.VxVXH!†ǾLHPձj,ymmHA0Dټzexe~e7}(qUY:77yFi0jʜWgiqP"Czh(Q;yz^)ɢ~z~uIj5_^^\@h&!P+c~q˟^:ƽU2xijln|Ti(̲Al#6(uF顲l*cИ`qXv a=iJq,*qqI C@8og> ApsM.h}%ցv4-u10N)8zY I8 jzdu@Ep(ͼ ioA#3us&>Բ:MSH27pnr:\Dh< B?A"ڥmSJ9_Й3̀V#t9wm|L|PS) _w<_#!zvǁ }Yb u3DoP-Zg.p*WC}b1dJ|tjL$zw@N'c9kqh"cL rGbm<-LyX.Ѡ]/>F }\TUܜ9Usq 0[Mz"yZ/jRpcHeŒqy7Npz^,4)<,.15e)*͜=IGq!(jCe顈5j=ɉ2L%g 9=:_=WziZ>_Yj286WI^ҡz~N_iq{+P+(#;#8<#\ɢ:c[U[I'mxfVrgwGgٽ 8LO3С}Yɨ;ћgbkk2W.$(aGv@.,[%E=bn)>V/f@XV2+l7j2ɂmm%qI38fjeBO̦=s֤V&VZ?K_Ev4y7 `kyq*$90_ GXu{zy5V-Muŋʁ@j[7mbbq_̓b>4z~42џ8 X(zT|]/ ti2|LSl!IM Mݮ`"ǩr ɶeobeQӒ|"X(@'- x,[K7$ ٱYV30KW~qݧzS*7CR.4UνEȪBix:"n#2{hzH1ͩڪJ x׭?Zo5+^mn/kk%ۍ*p=yۿ5)Sh94T `AL@qv1D)RӔJ  K05 kRDi$&DŒQBLdL4S T$>ڰ'0Z˷'IUc ,<@RC42b(L:Ҧ$呌UUFbQjf]aIhJ$)P(F$L"(IDXӁܨzt˗7 (-m'Bؓ/ 0'VdGjފiړX9XySC$u"’@S/n2IUc̤D"W *I'eI nJ\~gI~P*UTeuH;v_ iA$ $ "v1ndx9\p$/Zm-ǟ4 ׶X?!w@sT^ntE\7a{NR[N3]mѳ,&Dw3ɥը1PfSk^swǦP)ƶ)t`s*5Ԭ- ܪs-Zg-f>nϕ*O{IMaR ؔchY٤hJȪn7T"pATGAs%WX9¶r޾!D)s GZ^jąH\0h*Bc8byzc$$UHp)jBjY V )/`J:hjbQ"$!@YFdD[Of2b8Y'YW3dƟ}uP V?ZKٻ*9-+k Au^__qή;C H=ftvR_ x7nFԦL|$/8+jzĶ~DӤ7^CYj߳M9b!6:^FaEYw|)\^h)vޅ%Ondz8wL+3?3OǀA`3Gc8.ķE U ~. A8d5ĀP1;S-Fr{Ur)|s xGmn-4#㱪b?nB7&4xcB7&4xcHـq!8H8"2x Q„P*!"$HkmH /~?ۅ ;}Ań"yЎ CJId !QtU=Ղs)"ioCb1o!$.b𡾶| ۾K ?zAu߃<œ\JLɽ.dp_%} U2WMTo> j)G-(jĨrg.hE#e ,=*ԟl.HK'?X%c6gyb)ҫ:3oWht,RL<րw)K"a㣅ߝ =|<(IГv{ɘ5\"+EmVr RtIlTF;#F(q@HepMAu&1TU#BZju:Cl+@mpPr7nȶZ^Oj8LuYߙ 80OUMoBp<",N9G[Ff-"o}1x+o![-Ꮱˋsg茫 =fR]ux1c3Tn>x<"<\l_-a=,,5E꽔1L:\LZzӒmqeh2z6} hBCx3\m8^U0@M8}S=c3zW*U' <vK\ӎS{ߐ7o?6؟OcX~_ "byZ ~zHl6^j2 p9O tQ431oxyFW)On 3CSNp߽~>k(o% &f]*X xTW KP*~J@5U^XR݊3A9JGYRw!R35,]zj:fu{Z2LXYO|3y*d]pyPt!y-|Z`^P)xJ3Y/;ޘORq zlX?q9u/z]X8p}1[*ϸ6Ə+~fZ4-IrwUG5@$#59-akJӆxY-ԛOzx4XV+eĎA͍R:D6J{KUr`HAW9L[3NS#5ܬKC˩DMB&;ySθ]j"DG2Zuh 03֖yb5z%ʂWc’b*e*ԄaR&z3+iL;w4i7֊3RA Eck<ۦءA{l`B`8  9Z0&r$J+uܦg[SAkdQ-rLJQpvZU LuJ@bX\yOPHIl,t6%HES*12Z4ޑ֩DbWrq*D\#vtu)3 !)@uQtmh's$|2cʭG3P=Z0`sP5ɋQv︹ b얹/y^.`Yu`߽`;>w"fu|Ǫ#j:iNqg\VBo?իHZwL1s,=w^4Y C-pXZ5^/+Y{W =}f*=XY{RQ,| 扴C{1CpL:fQSbic˹Jh VԈ1źQ%4X%%Y"߮dJnc+%9:-{[Ӣbah,Ku`VKķu&; 5x`NHoW0[+x?Oo 6b2X=2 7s`Ώ9!oPη (o8,1S;c{">mVwq"Rt&n{t㹧+Zh~W!bH`g}oKI]Ua<50m$Ӯ7/gNݜln8 Oq1,AF k{Gq;BN*"*$cgpN$S@jGp6vFV᠀)N:6Os ubQ=׻{L@I&`a}넂t0:%Z8u4vᝩO Q,^NpzV;#Z~s*ؙ{9jGyK#< Ur1.|!bP9ю9nE󆗊âcqpEeSvqƄ='cLrF6&9njvR1SuJ,\xzFD-jYasx&9bަͧ(ZN瘋7uHzOSD%i<3mbxGIY,bOP1=} )NmjYOq2hhhhA+k?Њ ݎN*op t| ; *GWNz` >%H4e8}c=wu0J9Od#ݻkNQeTF.fe#ݻN1eZ=|O#ÞdQNѶ'_:jLoaSK8:@b1$N2P_3Z?>_g'@8cGS!0}{u*[_2ru*YdթxQ,;G AVu]@x# AG; c ңDr{NBpO/~?eO3%Zѕz U6mDA& a :*`! t ;FO(D tKV, 7O{PM1/dŔ/w'b(/|M$1 {g" (XZJ'3HRJu^B3ftl&,%G%C8rX~1͘ulB"g|=D)^3c62,H8k"^*%Pc5(:y(Μ}[Ĺ,lkt <8!%a8GG Ⱥ "Le%T8jg5YH/8ހ0$B\Pڀm%S!pTRTCR.QqKw>H7+0.ZBBB'PBu1x[OV0*Hj~q~ nDL믿/~l&]uUw!]Zu1ő/bƤ xW󘘫[KKr^ŋ ňY6#ۚalSQnBhᤓ2@=8=TF5)ͱ6<:%f[@&_BW%%nup"m_>T;aR⪉K"F # dtʍZAP߰(3Uq5*Lݗjaf&kkM}fv,LY|ۧ^D)Bѻߐ7o?6+НOc#Gͦ? gO8^,MLnU_,{9O tQ431o.ߙm~5D68Bp,4KUl8gފq3 X[#HQރX yю >U.K(㇙CQCf5 "x.v:B<, CC `%EF/Z,QNLAȬȪ:1?pé܍t-p̈vdAvS%r%Yss"K[=b#Ih$jh0HjX@ziTB)ՑBQJ:O'JN}vJax6 1$FNw3ur5G}*xcV@yA3NDM.p)H5dY:dse,FK[r᥃)6z HPjɠݷyBKV|`^kl|Z`2wWAdׯ _x0,6o*櫫35U^ѝcPIW.}~n"|/ŗQ]\^Swޫps5:jB50>|$BBy yQG*Ww'K !B3 [ !MH\xPYI,lVW~7v /g@"}o"VT&%LlE1AißPĎ4^8.ME =?is8ޜ-T-Kř(3|v=QJф?CfRQ[6bZsR|cb.G1gL8<%eAc?=PL.fb)Np;c4"G3 HFDZn6wT>Ͽ{Ƒ@/g _'1d}(-Iq[=!9{8MKwuA$ա.7yW5I5룛drΉ `xf.N#+u*Ni_oe*HgYyMKEQ/PQ uyc<џ\a1^_{*2H{*2_^㔋;%63Bb*7 AXb >5,V(Xy_r}(ގ Zq8 iEہ.eZLfz?NC9L1U i8*[ JrC|QU Z AR^UZ岶 c!:.L*P*b FEA Z2h*#".[J(+DHii܇-4eu~?teҖ\V0K8WK'Ֆw!Cj.4,:'S'?7>\65}1[Krؿ-_v1_*l!_Y90KpSF:`$1H)5 0-є5xG (l". RtVbk0ЗZlܭH2Q$} gPkT)"vJSL9㐋6{ 4uQ0@ I gb#DW'[FW`Q& .bI@1BV+CMW= !cn+t[Wш@dbYi m:J筛SF}nA.Ćb{)Yk#F==BO{jb]U^sFظU}cCsj^:[$;\֐;"Ekx\ޅ1|7he*Iew|--h΍yğ9O$0Șw 6 BRB-܉rgF3 <XX&"&)5SӊAXj N!&o>|Ûq(yx`^7wgCW-W Fb__rM <㯋˿quf2\4ySIg/>c BIzV΁~4W%_)Q_G&595Ġ4&jٟarHbءPAekUe*<#b3e8fdaCrBi&4RE',7UPݘ.ni6r21\|z`kuDH6!2s)U[Yt3D,5 >S11(Q vlkr%&鍓D!HG (Pɉ!@,#؍FMyGqZ,6ih#-1\YWN !4(m <(--9nrb?~[c * M_/5z)Z`Q5.n:P`U` e>B#NDC/ ںWjtW*DLPNq]PLzN!$PX`dH3!HV QJ$3ԶFK ů^qT>e $ l`A"/r0Z/2a}Ky#߽=A$!%=>aNGSmDžOaM\(`allIcDB4b\ٞXwDeIK̍ۋsii+q/ x8 3AuXDACĿ+ihkVhAc; +h ZR Jނso :JK"`Ac[4ic cK7Rh)𴰣9l""AHxW`0>šWJt4>l}(4mgR6c0Ě`)qMZ"Pv(PB-q)XŸGS JUQ>a[N*+jf)5e/%liw& lVum/M9w?>Y@zs:ny[}CW+\ySZWV{k`~է?iφ~܌M+|.P{~jf/]f_blB-[+zH"W#X]OO}=< }!-dR|i-L#y\|1Ywj]KTN7Tir>H~zk8f gǦ+q9XNM02pp?vTn}.|LD֐ŝڃЭ|;Gwa֪{DeNI=GuGrq᡽3oax6{ O­5A=Sgc9+ :U{U2Žn +)1FOJ~P:pSOu3N,C0l LҔ7fv$:KA|aZdat.qQAp>' >\ 嬆7O)>.@̖QͭZi_繒NwGkGiIy|bF:-/hLpX{<ʟ|f6zdv4k9kGiun8lpă! Z55c<ۨ6*[q9#V؍mI"[ldqDrL!?mPdq3ﳗKM5t"y%xF{'ԼR {'mY]ГkUW=4S%SĵCQY'Sx՚=JVس'fFdl9Y3I|6 QwΎ҉p4 0#.m.t3K)LrB28ӻa _Mws>suW\OEpKǩ 'i:Wq!Uitd%I`F#pU@8)h?]EQ\ g2?|O9m[wx#_fJXbYglm:Ƹ<{Dx/)Ls3Og"..+] >||Վ+/Lq:*:0k*lP%qB1I7SIժ :FGJ8TZH\M\Go3*$RqE~_ٗ\Tr^VJ8x"ZarRREE[ںWj."+ibUK6lb41xEd -TmQK9rVid@Gjʂ`PL=VB@^d71h;P&m_`wxםxwB׊x/Z%ZH=a!De E*죷CõҨJKxRTo׶ WX}}9[p-oq)bo3u6T0W;+=Ql͙^XJEl8Sp?6]`guh1wFfe4lOIq LnSS;K ܷl>ɜ>#.mDj"M3: ۣnFú;>LLS[( ffDSwO$l]ND(̮S$D; ˘dF"ҕ;'Ƹ+<ͣM&/>UiÏIC:&ZYb'.ε/'i=< dVIExF(j!{5OySN=F/)!Gbi <7"5T!b.l*6F*>6p ZD+ki j',&ZR8rYs|e'89O)EGP :I6L m[ B@/p&/pInz6DnM!2| [6ArcG J ]Ԥ4=-J{KnoZ#>#F(D--@ہڮ]d Q^m$b ü"Vqe0 c;iзUWq^:<ؙ>vC)cwc_wQ8l!@N$ T>zQ$d!ID5nt!Ij~(헴ҔgLyp<3c\_*ѡvQPzu( "z(L(crԑbc b$QpO;Ġ=vQZ-ΚYSgTKvrq{*r-D=2SCl #!Kham{^8cAҞ@d)J4Yc~ åEDjKkmFESD3_=Ipr9gF7m+cK^dg?ՒlS7%ݣ6Ub2C4\pn Jbif J„5B/? 7uq%B%d. cKRS(kb(K3ECt-7"˳8:d: p mʌQt!TJTqbaF9xdEZw܆,ZٲeߝXYD*s25YM  LR?"K95 BO6~b%ϐQ1(ni*k/p0`IY"yBqs<㝤rřKˆ.RDD𗨠"R m. qOtJ;DDiXwt]o'׋ݢn$e2VqD6˜"#InU!gJ r)Ѭ/<rpQ!ڌĖoZ2nyƆ..9$YPW@",)+/}2-J֮"O)PF o|K@wy !߰ {6 !P L x4OHBA cA࿉f 7x !pA {d%y]sp`Tw\ ,?)yh~yuvy{~)fӢd[4*]< KE;gm]%6V"g\atV,5JArX:2s'gs$cu"7sVSyBcʼсYT;{b9wۉ@`)9ι暥d9 wtpIfR"ˇm< ,JTq~1lbFŌQ0m.V;\_k9}b4Ez&CY6LACp2JrxTn;|rϥ*CߐS!R$c bP,ԖK"%`vldY9D->f4XLK] 0aiEKƖR兘)Jy)L`EMVJH\902CU&IpM5'\H-E>s)}YgԚB^*O5kci~(B/;;s?w+rK%컥 P ̾&w/u/o8gg;$E1߁htr7ᱨQʇ!w C^AyUM/2od=H:#(UEz9mrkJ̖t vl)6b_1B #_1,)")gg;-sj}N'A` ϣEU[;.Z?],xP~X;|pwjOWb>3X4?'y> xzu3 kRgT9E'*xQ#ۡ+nceHRӦMv2zc&v6#X)آ_%͒&7Ǎ{$[xO$q '#LSvbYpP[J[ ca] u<ܖnuh7ŗe*3tv!Ɵ*)b/Uہ t>[ = -[f}jr L/'A17uCYN|o͒>/o~3v~=/`Ϥ/,Q[r;/ê6(bq iGaΪq+EԆV뭆YzUBn+sm(ŗbf|c]X v]f/cЙ }l~׸l ڸWjpG!mY~uWvEo%k!oGi~ 7ޣqGem!U kƣ҃XW_n5ΓouHAz[n5>xo'bb>ߢ \<řڗf[ l;Xa(;9Ch $j 3JO&O` ~4L`_Y?:t5V5jk\6ir+=E%m31}t4˥^4bf6-DlV8fv1xP:&0z#cW.Pƣu>%'ňw#wƏq7'AyCW끫ℭzCSkUĭw'n$ö,Vo|/z1( l>0'o׷kqԨY׳ d@QxRU+-`/]5F @`eE ־j~}muRR~E){a-Uhp:UCwEF༤%q:frAd=*N0iS?:FsN1psõx_~{_r0^s Ih."DM\HKs0^2k*497Qj:4 NxxNFV~AqcԢB'b&XOUGM[2~w ;~QD-RBW}>#t!Sl0XӝU_+9y_UZ]}y7: c')+%wehd[ס,IW*q SH6G}zS'y!؋G,; # *ݵkINU ɉo*qM9!5SR}Z.DqsJl_$A&N{^]w;F".DvI\6-E~w_6q%ih!kVqN2EçǔH~H"5}|Mꪱ]S0μVI#(QEә ]dDͼ V"]GǝlxAV$joY~"plTTZj >ׯT/ IA k+F҃¤7,Q avB(ޫZjEFS鷣@}3< jp7c=S+@zfV85hWuFHLieQ76e&RiRA:4,1y0aJ1z-y$XJ3.9\4,qk 7K8$3){Z'^  ڌb4cKMaV"8Mp9\f &T AQ3s {#/'l`|Ye|ِ Bϗt[Vvj|>fyx}^PMtYVR}VRFsOJEyh瞏J> hi̼8N$3 %c}9&hLib̚ǘsPMNĘ5 ǘsPM{1f-c}9&H.cکǘsM'Řrc}_@N"ƘT1>T1+ecca5Řsc 䳣x1ftyG}b Sw.Ƭp7u9ĘAcVvP}1*yb s.sc \ŘT1>T$睋1+1c}9&h31>R>m1fBq1xbĭǘsMu/L}}1)tb5Cc>3Eu/LIyG},b̔ѽ3e cgce90|MA&RV P"d6?aͼhfMnkd|2<,k ᯍ ||x!r 3[ML>ݼ,#t&>dTa-fY9nma\6< ?=,tErfφeo;#AHK1wY`Z2 OZ4 . x(q.f㝦ե 0vXGf`N<ǣY mi 6TˤpwXR\fN2FͰvxnCյ ZU8P~sck~ VAE [j ULthƣ!q2\4U˴ LOWtɺl1~4Lբ>!߭q."1앣 7iJaW{nYjiD{C+Za\΢l`s\gD} :,|2,80}Z-VѬih /yggӑ)NZšUl(^:q]ڰhkk9AY]ӑd}a=t .҂buǵ2:)(nW wZ~VG:MEU$pU?Zv9l 3DQh/GRݰe^,(E(tO:Rl9\;/@/c~p~p1(&/@Wqdg_c 3V>⎵iv 'չ*UTRu\Y5d M& OʄfFFS TL+?{WFJ/cOwyEmwmyZ)CRr{7xHIVQKR -̈/"2"23RsL0A~OԜ&;K .Mɀ g#XoӋ^k7h1zK3K<)DRo$zFp<(N>1q!B5"ͽp&gA\{rNbc!J:iu ؙ7I"өμǛL,%m yEg?.ח({ UFr>E-qDbRIl0,B?HSivNL߂8AuXJW,qQdiLم{!YJ|qQeYJuj& ɿOj ?7,knPVdl'Py,JIFxa)5^`CtcPpxbNELAƢG#" <A B@iM*Yh 0^Hc4,ך%8t1WBP@QHad+8NZaa0(2BH Ȣ80Q1%XxaAl'#z,,D33b*;0(%G1,`0V+jg0 >J̹ T [kTCH*.9[RP52֔b Ѓg <a t!Q&2zϙN-)/ I[ |,$`.FZm A*BSk'H )jj3xhVV}LZ':.)~Rmq<}~i6WKiQoauc̓_(E$繌ݙDC翾[D]rq.J7LWKzzu9:ips󛡎g? >~xbo<εU|>iUBo3}3cȨpΰ߮?u%Vevy򃠸ֈpZTj%6~ I0Pj*va4л4C>{[gc.OgE)rXبLRؐ#V$*COODAWn# ].Dy qε!,F=8??x iO}砄׽ )zc ZtrEp1B1g sd_!wV֓M`?T;ag}x8оuA´?>\꫟)V|XO_ =m 6>w<_ܧӭ.~ί/^H[N C(&t; \W_bAŘ+YB@`E)3ᥒ4r1X*9xZjÖ]쐵-F/T6WgmBhP;;_1E,C6v8˭۽rAf%7`r~5J58FNwd! 먈-(OGX筱cX\#5xk6FZ)ukOFDF4Qb?v"ǖE-huTxf=ޏH!vEhF*1k&WR-K[^,V^<\c!,.@`HY yz ƈ%Gr=)zˋB./nd^PY^,/~zJsIypE/1}M(ՈgI* (Z&I~҅ӥ;8]9>g?0xU6\ڮ[ܽpL \1Z[D L)cLuD;ňƒ%6YmP2x([1MTdhZ8p3WT=&mc4,2,o&^[5Y"`"˽KACc h9{]bYװaـ.5g<}1JLIĎ`ԡ()-\ *Uf_]-gd~ߠX]\{"=Z|p>.ݔXj7%Vn8* |(T3 /bTtxFXd^Sq{ ""Oaiϲ Aaz$mfӋPjbzvեU7ͷ8\w0O\S?VAcK=`Ʃ""h e>#C?[ry 6Te^iۨ^iyORr'Uշ<)Yٲh؄s5yŒ7r#oʩ4V1`>Rρ AmF+"HrUB5L 7};ai,ly^GUJIw no!01Tg]oq/G;--&ϯuSW7 TMJ'b$zkb1zb1 b 1կw9ΐщX1:Qk:A*n !M+Rga!¯YQI }scCZ C¶! }1VVq%mƷ!15 i6Wp4c,jqpy2cJal^9CCR*S8!`%`YpC0ӴJ<T`ûi<7ChsVgvv`v7}y??M @] I;ww{.UD%(RӋ 1}X\JNjs_7!TOX{1~)$) (Q{g̞[40ϳ$?$wbFfspRrj e.&e$MKe;Tдa@6 'G;6嚸 Up] טݡvv~XcOw@zOg5sP2BRZvjn1 ]BTtVl-bikM}Ǔ(%sD&57\ku$v+u:ˉ#n'ᬕL:]Ys#Ŗ+ Cɹ/@s;s//8rŢݶ.%*JVJH;'ϖ\W)#%9Mya1e Bg-) -Ԃac`"I,UQ4{ N-"eH?꽋, f|d=!gbw8h>y+4)p\jvQzR 3tx^2rTOcߤ8Mcߤ8M5]Mg6"SSD8$A2LX(FrStv)\F;'k72;*t~ٮv$5[.e؇( 0m% åIj&oj* GR/Jhn.i@\P# ͘j56[v0y?23cR1 I;{]J9r+r0r'c*#*&0+:``=74P x{#aFC!a72} ZAhyj1!J#i>KJy9'$Rroƈcq-)ڒ}95!¹ĘP5s3V8"n}.%Mmr%9ƌ,3o"CKu`#B@sx&eTAV $g2颓V;P3BXYPUDWfE..iTDKtR^Z1XmJ*0O %k@QZoM"Tit* 6cBB4$ՂSJhڔDƇ<gc*X_y^פnyOaLĩx͘f1\y0Å2 q){n- 8A)E~:8Qra:e &8D# ; *X 8++m9k <+'LK<%`2gO |̮,'xBEe:JT:񍼘-Ul0p_av7ߴɂwcP>Xd>?ZM3،y /)02y4ۇ,-jS=&ϵ|HҊi,_xqLjVX}doH;oJ ^bO ZO %xvtvI{'e0@Q GFj0kLNMA6*5V5#ǧ⣚3OW#12])C-nw?@X`c'bwxi0F{>`[%v\5 }T±(\A~ϼ͂3 xz9h.]fT4L]\2L$So`W> [ vuhyIfu5yPd1kU˧G>)M8-0*zǀODu00; _QH'*hϑ6!2*(% !4a\_5Q"`] -QpcB"r v@ s\5-8Vʿ N;iWпFxUb|uEVH/haE hhI} rNWwс8̲&XcV~$LܣJi:F[jZ>|$lu|˰y }CY腃4fU%䆞DW;^,nGGh1ӧ,1u t7%/XK\#c=e2 04#iy)fj򎵩VS_29Ǻ‘05H2v3$ \ %Զ|f7˦*ǩXcug=ym^B}Zfq_jTDdpjl>#ww\\&IP]-MSA/}zq/~\]92 Z{V39X8[/ 3M_6UU={ HnP-u1V\ß+n Xs>=-4# *hyavR;S9l:o,0 Ҡj;ᛲx⟍9>zð5ۓ%jx h#/4EB#zCV\{yio->iJLtCjX3:JфIEtRaX0\*+N^wo]&  mHlJfF׸ٟ߉>? `$z#3iC-+@XH%>#Zc|I'~ 5jkdcx~Ku`G_.P!\Qɵe4n>?A]C& $n EiPHi . {*| :PD`o|@jd UlC%Wϫ^pY?hQ)\/ecf^e;wfFD*)IߢD1)Dc6/S{r\ފHeړ-j9~CmUp.)g(R`r0k8L" E")1Z ,2˕0r=OEaZMwR?a#Ÿ(C &-n.j(Dd(ŒDxO@%F`4zqmi N:9A";lԵNa׸Cf#GR,؂!{糯YF3l{k/6+DADrhY'ImmФdtFsIi |Ji'ׄk-3 X .X%DFM`qx);a&ۣX,GWF X)O (X=P\y.ˌNMt&zɑY5`U<1#(3BAS\&'BM#cш`0@$2 0jV8+:!ʞ swݚl5nA4?բXsjae,fRaED 1mL)%,AiYf WO@O7 7 ׽fdRyTF[$,O߰ᅒxe+d̓t#H`]^@>||삗3ɴl @0Obf%,;Ǜc]m8 ZѝξO6ɱTh4% )Eacڈ]̝o\b&WRIۏih7:(-Yx:& {ɸ[X 4O+w=G#vgEyx(DRB,b:Fϳllxuy #6W#?1w{?qF|I x}_w x1J1oO/PKy'dl_y}=$J^0dUDž"h_kAc|dA4wa|XPby3-$55$^阀dt\Mb1\c"qU]1X]4i5UMr4l1o+W_mh3FZce(#V**QuGb3΂(&ƒ Fq5`,cakMro|ʽ+4 ͿxUd0.V\9$b&+:T2l$8 "+z+ ",Qm2kj .}9}gjU\٢HljLH2 U!ZM%) (3kh sub5J &*&9;-BvM@Һ,IXEŰ #Iq0&UN&R-eh%U$" ]]den1&FO:}{8[e s.v`"LG T8`K0#B0 Kf+dRp'_a`):5 ]3w2?C1y;f{ dqC5`9@d" FE[6uŒ`ru25ȕ(B4U8c!Ԩ8!+:bNqgV頝` 6֨NxB9mZHZDZƇ)q~ƞXRgc`@}+GKjHfi.11#`E8 .# u>Hy"iDn6Q)02ǽ61&HgmFeqJ&Y|{!M&_lQV~d-j%~L,ݬUz *[cMJG2 ђܑh`Q0#аQb:R@j텯6ӤC )+wqV -nK\ -mvMXm⚦} =Hy -sqM-T586~Ȓ3’;bO숕Ç +kzxU ȀwMW]nw DPIGt )b*a o'V7t$_ޜ^Ōlo&?MZAKIBoH ע7}fg-:]6X\]N}a.R _O>-J: 6ySdvp)YY$M8#!@n7q0La@d"CH]ɊbcDVCTj<-`Gt |i D9J8ɘ0J&s̮51̉I/VRa}V9K3RiWް1)o!%/D&(02AX SH<"i0.t4B)Ĭ=e3 $5&*I"&E Z56MIӕ6CJOF3D#3d#kLTb9gXx%:?|`h+hR!Dk!zy5󥻘36dvjԉ&Y_NyVDY(@k({heH`A#(]nypN!|j>7 썥HIW0F:xh̯x˜(Y3sϘ*c[FriϟԻ Қ/ΪGBC0m*Sӳns'gI:ϒt%GceH}+:ۤ"MX:WȒY6@Y]EkVĤYѺ糛}w#i *D߈3dT#%MpzUMHrAHy\٭V?./˕|`$ZD$|6.t}mçWjnV5= .0'??.lʫ?,?'"SG>5ԦsmWB\h~Wjۘnpt@Ac/7r^iIZ:f;&DUJ̕^+>*mBײ\C+2dN 2:vܧe;":.{p2ohT}F{n|ڹf'`GDB `Sh 5ʊOMSRר@Xxea,Uc ykUT(Kɮ9n#ֵAY-Arv _5OFK7=ߜv`kJ.XZws3;js+R!|(9:qNO-awC, z]}PR;Fkdi/)xgJf"yZgYOYo]Mx))S?emOe)4ʐ3s^BzT2z@jIꔹCΈխHJvut)5F5G̈) 3 JƑG"+=)/ҌU,2A"g+oͤ kƑkzzmh+W%竷wj>Qr>C"Ǧ \=?#B Ta׫ ymBû&}O=OIh$Tv5w_ҳ;_,E;Z!>mN ~9:Up*1ɨN$mcu&6+vs:gSJ|Ӿ-\;v(#+[Ofto4b난oz LJo*^$ۘB{mt('6o+C] kHP2:0w[C '~eu"C%[?.:12mG|b#<<6\k1J~ͦ&W`ⱻ=)b)|D&0ZSzιB)1|OӲY؃Aue !?tk| N-Gc0ѽܯ^urTZPDG>d;2x.M~=GamG9d \wԖsQ |IL9Ws dfXs$<4eC795[͉3)5 ȣƅB'pE`.=_|ټ"uQ>aت6UmK"-jjUۖd-h׽%LTa@_UqrU.e,F/-uegsYKL^) ^a)*2##c@dB6օX@!  "hatRA`+hڼn$>iO]-I`WW%Oev?P>P?J c5G01Pp'/Jc(TکDlIV+@G17QkUZPK"1qP0BGk Bu/@-Kgo9pvV:NUx᪀Nˉp2e:|4TSQ̛WlBj| ޙ7n_$LT@FL-&LE-$Mzє`H2}4tgyfՄ1+{] ל/\OpX+XBb|pSޔ4;`Z [FfH7ulrmp IގɼN7ҰhK3ٱ=n:֮+rz"{@fhؔlJz}M;Xki.yҊ'l(H4atN LGtLsd1%JǠ d(@MI'7؉KeXCɕP"4STSzTHޔ' 9\ȷׅe,@,e L@B Ca,P);]JMl0T1?t m:,$Nä@ ekTV*WlKf+2 ?[x22;ՂLsdp H#i`NvJuÔ|;n>ڸ[{N -n[o#b$3"l# Q{ER;ОL#+6Cc#%u8 V2>zK7E h].$W0h"޺`4HPzHaJqk<DF t(7 tpn:|z.O㯚e=tp FOM[pJ w[KX{88Ӯ\h:F~X23yLi%34YQYK]KJQؕq׎7 r3f`qV$F|$ k3[evPk)k=+P+A=:4D]V,{VUu}uT\,޼Z\CɄyso^!x>ò*o,3 RR#,ۥ>MꋮkþN/Wΐg$ =RGfo?i{5QweUqenbO2e Cߪ5j 6E~|><y#WZ {'d˶ ^Y:~4kvR9iфfNIaAx۝%,vgɷ;v⺚p+EPh,ISm@9 \Wq5{%-JQz뙦 ~K槊LM Rw]z}:z% Ŝ܇?TT+ytd&&a4 x)̛p?cxaoUQr yƈ,aW6DXDŽc0F@nAEIfxT*]-YH+nBS!HoB>"@D*=0tf6rr` ZY ; IM")Ekס:cvQ"|õ]^v;FP%[ \?(E8[Mo*ai3.)u$wOLW|V>MfVhtvyi4ЃUg3nvd>JU<цb8)@J:YMb?<5If,5eiop+`f@bR~zBs!pIoJoe1A2J08( ;Ovqw˵Ku\cWb5˓9:SɞŨo!,zz[,Jjrv;7=3ޗr4%7G?G}+{=}jT?|8<ڃʌԐe4(N<x">/;`9YD~iK;gsy'gI4ϒh%h5_u*˘0^*e2, !daκ s|ew$zqr^6"tVb!>.EDJ2gf'Sƒc`<(>dy РЯIؽXk`(%Z./VA(TxmHQ)ˌ*{P)h P wS+s#rcbWG1]} .*A`Z(0#Mn,@3bTT4fx6(gV1k- :0J-Kw}^Q@QO-eE \Z:g)hP6&^W,3\kX,imjF/ *e,Hw2}RW,6H,(tJ xyQ:@A&^  w8QUȔBWeդ Ew5.tuj^ōZCn&rr93i8.>8z|}-Jj$WJ8Rm5)/^D KtŕU{uu)sPw20 g͔$ 2.Ax̓3oWRY*l(Bs5vH,UzNJyNPbA0#N)Hc݋rWYiI< G@V/#NpÌw/7ܖbӨDx:{mX{\_.>9_:/Ψ ABhyAdQ"qU<CϴBk ׁ0&G5ix)BV0N*A[(pQ*X+ۘsSEX3f\m) Y&,иfvׅQ!ݔRg(8N4'"lAD<µ0kS##FT*ީI%g;o;ii:= :o5SuIJkVSMhũCwE{C䪻D5fpb"B9."IT$(avwɒ?97Om:E QߊJ:HԒF+WD@0bjPkӹR~vY L_-+D} nFUPEMyMnᅣY7}Z+ͺvg DRc6_*`|_ ?}e\ ^۪qY+{y-b}5?yef&JOLJ(jU1vx>g狡#έثǻ\HJZӊ \..\&\ּ!·sQş!vy3!-`Kh0}.EOӁy^bQ)ܡz;C58BY'{w:0OS/]i$"E>nx HR⊇ɤ| .=jQB#,bYtZE$JK(_Ӝ/S@%QE`gMt eMҴ!&gwn2Sg9F , a{X #6Ҵk .C)MO,}'Eҡ E\m0X+(둭ϙ'-塤LQ ݑkn)x=N N#)%ﰞ5OYP6I̵W#=Bk:*}RhymMd$?[&4]:D։DUO=rEOI4L}tJFȘ 2qtevXʔr'[zRr"w' ~b\s cY%FDKP+_c5&4M(f%CF*3TFáI3}&wqN=k=;#汭6]M];uٴ|[ͷH{Nkì M97ysP/]k mW|hS>g[[!(%h hg:+-dmӟ՚QtcIIYM* tFUIٖn9 8'4Λ0 NU,kRC8c(O)31L 7Wj#5W~:rO!͖18\]$Mf ͪN'T*hNN) D)X$|T@GPFɈ~ou;t>4I'D*)G@˯j;~^^+BkP˜NI֪ҩ>W.ء\+}]ׄ%u>` ?'H m0@CH Hg+}֩зwP.w.۬K$?ɥ(=->B݈.AU)ӌ}:኷|d2AA r {:. Rd'X"ۏˌGJwhypL6{O;0<8KWI+ىs㈱&W%"MR"Hvb?}GdM)[/=Iq7ªBȠU]@4UX)4ӽʱ?.:-bHɷك\~޶uD sIMu>| Tw)R&>IIW< bӌEȂ S sodlȃe }|~+R*ś> 6Eֳg09$tXnp6Ǐb47|&C^As=(Ngߍ7o >9cuG!>/_uI<6l9v;cwߗTW @iAJxYINR)4Ɏ's:Q59-gy4N}4]xqC[i/CYUT"uUVopɵ )\He$#qRC% T{jyfˬ# #||'=䃺] ~*΁ bfOA(FCnV9݄Ҡ_dz|eM'ѺY&^ѷ}.rkV`VK1Rc1Nw"("?4w\2DiP&yw]?}7]rtꮮ0zD =W°~4*Pl 6ܺɺG譩}Z&wOW%q|_*Y@7OݡE޵5q+鿢ӦjIh:ٗ8uv+ٜ}ɖ YR$*v|*}.˘΅Ô*eif V-7:t7?I#/^aoS ZD?s܅$Zو"} D`De$|G3 U7w%Jn̋`>K9|}@^.H+ ]ь ^G;Ud@j?_h[-h?_/Ϸ)1I;rBJƨe=y c*]$\ڸZj0ZǮn>y'=3xmCfz/ȴ):tO&R%5YߵubX/xfp*,BhxǑÌ*ˤ1~|B)cPoݐJ--9x#Ĺ??ưfB&e3=*MdL\=~yfWl1^VpWd 讎O_]r/gܯ. k]v\ӻjgq$W6FL_:jnTEuEY/ݖZΜQdD}WKiy/ύ3}eƢ9b嬯1X}nb nѺ7`۠`1c9R0,-$P}Ӻy͋n^unkBJT< Q & рvL:,JV55]_ԩw^ Vu EdN{ e#+/@I5B;F$J3빎-x'IkDD*@0ΘhN } -N91u73#zR|i%J:wQ FI;;iu7ށ:46 Uq&WoKdu"{iEQ!ՅصT}?tﯢ;"(\vdirٔ(vdY %VZ#en@ju45JIMʂ#s R á E tQ4&l*,ZfYRW-u+ڱk zwj0N&{ѝrYw:G莬C^|,;mg^t(^֝Qt?c$s;DL V5s'gAY.!*uOj+]ߔc \ s$wrCUbJyyb"iXZ "E"ʻh--[ RɷTG|xb +MdJ(ޘʀƨZ)N$6#;^8`4Ɋ75YK965Ω&Xp<5/_)KI06#EI3Ơ)kHz,Rx6b.:˜vq vØH]Z؋&}Ӝ!f}#%}p}=bh:e>$FPv5}cїno8-zhS&dZߞxaL?7RC}L~M41Y߈j 9YCͽqF[޼Q8ruجF8(n=1vӘ*)V1)&;opp?w/ PU4?ݯ׷#^8g0ɗVw*0WۦRRɀҍUFWVٍQ&i6R IC! -C4)y[Y{܃-G|:$9|Th~ػ./ fOp6r' hTRF]V-ʩR*Td֧Jq<Y1)Y,۠2<Wz  ALs#+z,~~m.4C'!J;CwXtZ/@Qs/y7‘#kA.\0\ &#HmqruFxG*%Cޛ*zgZəPjUORJm>[DYȅl}0Ь][Kq8* u-N>B:Lԭ/QZAƔ9_B50g_~ktwݚۆXSG=h1琝pK0 -gvȷy8!m1M<1}KRnY ]dz 5" N?OFDC\{D]{x"B.8 洪@XpC%lAuZvUCӜ䰃֏NE%ůjK>A.=e/(H/p\R1%_9 >^eX3^U͂Ěy#Y4dT٨.Q]dUi3(>T.2ҠY&E3 lS~\*FNH3]_%`25~ J˕"-Kʍ=9 )&d|2W ۱@ tKG+ZvRVyA]*mr.<4%n E.e=g׶6d VMu-V:G8J~&Rh`&aƛ' (MSc2eGH''!1$$,jajNڰeU@9gr) cڕ=O?#ZSOQ WdTci%ނ9FH&h>kȅu9+ky? : ՜4;哨p_7{/j͚syh΋ܳݩ1N#5Cr 1P*xk2Bp2 * r,d07.x8)-v貄֑JD|`et`N& +bNJ(HQ'I<̫N$՚~X:콸W7o.G^s9_=&/lgi~{"; ;자{#9<\U=W81t/߮ޛWjXP'm_s?;dZ3\LE|uE1W.'m?О+6j\T!q]fNJM3Zf/ F\F=mKy:T Q Qb$pQf%AlҖ!P}etvOڰXO=hCB}mAXPGW>Yy=튞6k*ZcZPI. \`(Za s6`LRd L hiѫ׋bX UqxJXmVa?>ts5f kf ە3k7 I2i\d!dS&J$(gQ8L49ՠ|!]2)p䋧Xsdp۰Q[Pv Cny$%R"~VP;/chi/3Hj.%+_J ֡12Jw.G!NuRHZnyaL)1DBDB0rϪNi, I'U]zDV;G iiU,R\:ssV+0ˁM?,<Tg߿8;so?hx ^.PrT^2F ;R魆40FH{%큓lh\>Ue/ߡoA|T侤 m]JtB!)Yx|snJ^WN(r<=GMvIo?2uKXV!}2ݥI,l3!zX?ɤL#ച(9Mˏ5"HU*ɹ4Hd'-Zz"kE]K.xT>XmP%]AR> "H]_ AWZ)SZ ĭ*|]vawt9'5Gc37l_l;N]Cw˼8Y Hŏjzeav09x{qu.;Y]ݫ)VߜU)Żǜl_yEfd v}`mKrvPq愕]@ARJiug/ \pSG'vtm*f4]=?P~m'7h댱o_l Vs55rJҢSv폂^q}LTQaQ# (p NZ@;B0dq)+7IfJ eH BNReR&bI;=X;(yV|te aCh6@P6Cr Fn OLKX{hX%24p:c㜟젽w)hG:䟼R~'>N9V7ץ`%Ace+G{ӻ\K 6r |v؂\Z k5%4 +FLfTya dL:iBϒ0^` ¹T@UɊN~Ɉ[@XQ u 33BY^ni h6V誠4D蕝~Ӽ>(:4+Z/k\h^ Nj.+fwOy~6v ;e郂\xÏ?w4Ĝ0WR%oBgHh$S:lME*-"I#ŤfF, Ghj*"8s#rE̫3HpN7l>fj0O|5yZmYj쳽Bsz]cH3MGz@5 `Z 쥀s"8,Ĩ,Z.NDFϙ0Z+pzD5@hb׵ę8"v7Q8N9 83["je7O/iK%PP]^! S՟#j;Wn~zxo5n[޾|&IDQY9Bӽp4jwl%hz^F=wPοW#8V:2`:%ޱѐ|I>a9ٴ\f8KibJ>wfUZJ##H;Mo3Y3  YW}& 'W#"缫v.PZznDl"0jjViӊM ݣfaMo 65]]kNw&.5(|yX}!*URع=Ϊ)_R 6!TCh7NI ^Nˀy4h!b]FSyDPjcZ!Cx,bΗ͚!ɚ7VDJq`jt)hG,rqjE'eH&Go" ʘE"N11yGB#5/ &+ZCz)9c4IW?@D^nd!!߸ɔGOoh7knNMlZӽ[yôLֆ|"Z S !rvS8[)9S:FvSbBjńj6$ѽeJo9}X]MڞKZiYĒZuX3 [XR[mf Lo&2>ü/Ԋ7kuâV }uTB&[4bGÖ۸hLɔ$laI^fH+Eoܽ-L [ZY)B3"7=e5qOoa&}=Rl P{ TʷSq࣫o*Ч> ؍!Cm6pO#L$ )tMOx&E~mS0pggשu_- C$ckJUozݣEx֦8hqoMR9KfϮ''-!MWqI#4Q2(d%<6)ZNSUNaU3Vv4Ļvy'BOI*\+P 5'33:BJc-O@Lqvd#15A<*?? Q]%F؅VH/^&!LBzY՟zs -%- +T_io*/pLÒ ] Va K$Z].އ+ֱ/|,Srوv/@r&[䨱˅[`i׊*w(b+f4HfBX =eF0X(m50 U.pon{[Z0L з2ȑ &"re5췼L*IZa(1 %-W|q&#Ɣ D!L׉"JINhoNT*l"ʐ|m'(O Y04˪R%yi_=My(OJocXis[).f1Y8>,`|XL/1# %:_J@~l&>Q]{ mrFS]ʆqh% z3;x͟*nOzqv =ƚ{W(OP -ڀ0pem}jbT95kj#Ň`'æI0FPtd v `ٕ3@թz~[27;x?BBZm43ZZ- B<>i\Xs/&(X8Z!LI#-q e&Y q:U@*8RQ Z'tY+YT〈K2T$ 5iAPVs Z RJi1'V?$Hl* ,|(3QLF7 T`:<8R rڑR|u|Prg$FaDjH.Ú5*+qʥ5M3=5%͑fFuk瀉$uD˄3Sf=J i-ܾi}ס<*2U kZn \-I|iinD&;3OPZ>=n $B0auiL{hU#h`8GSH*hڪ@U`*Œ1jI+ wRJZ7㼔::n q;~I q8lK՟˂Ne 8T0B0a2dRۡLlsۗ s51Vγ9y?3Y1Ɖk:g|ڥZ^WZg»#I/G8?O=,i=&qcCFXۡ';t-. v=w%U\0%? Qf`%#Lٯz}4]~}Lh\ݍU9"\+]5!%7(,R393:8B1-Aq!Fxڇ*:/e,|7^]ňQy^cNMT  ׈! b`"":O%V!ţ1Zifm4 3MIJjϫ:(^z'"(Z%ȏkA>FűD L 16BtFPU]e<2>,L,6PA$'*= ³ A23IBBh@AhZ#$Gߊ".j"g1b v&SK;>vk3Z^~gx{^MP QGp=F1"AދAi?LQv6P~Yr7,YqjiFD4?Zn৪2;=Ͽ,`&V#&>vlӽp4*rT`5?eܝ >_ը/gI{7+X,:^]S vf7HAVڨmǒ }^QDQUTVARKLƗ'd̢0/: XtqDеILȡ$/z65Z9cԘv.vqk ?谳抖W/r@ Avs "?k!?Nl6pM'tYk2 [{/6i@pw_v ӫ&$"P9tKrfe?UO Iø HHA"u"L[LdlyCt[òWos(I;=h7wwuN~vjҪ35֮(Y[%1h{uָоǝkߤC՝ui4w;g:]̝ug$ =ʝuftWKW$*zf/U *E=uW\my:)jiǗ6`aNJ,c2RdonnIH^s6ug%5yˋ-h(a!JU|}7- hM DO'5 aG(\.Q{ 1Pr-X!\Rj턵yIFcVi)s-l}Q,A(h,BOY^J]48o'kg Ut.ͰɶL#zm('OP_QDQpo@\kY:$4j Y:$R'h$PΚ3PZ y΢ m8^iеSRn^FALqy'NT 6D1<5{R6Q&t"G«B#J 9ɩAJNK*w8Zyøi%DSZjakbBdF( xkRpkDf{:X31}pJ[YgDU|&fׂH:&k.3dxGT"|W*,fcC,nXZ/Wm&=ڢ76;? x$b92.'Tt[QS6y+y~jJ̫:ݎC|@$`&jޛulHǃCִؘW [] ʑ7{CО_5;X4.UM6Zv3ݫ׽jsX?ƪ%89\߸5xt߸ӃSr6:גh蛢G{ɸ\9-fFQJtڴk3+&JĠKKdHdYV#s[!)kkdR ^ x+xyƀ жPs 劚Zg#p *$qG3x,,ZIa(ͽﹼTJA_/p{ !^%Lh'3ClAtk -!a(J^r8Ւ0c@ 7 [LONtd D;Y%8(EvDƄN4CͰSʘT=UH3&Xs!~(:?EPן5nwf#ϽUGF]U6M9{ 5 cTL(Eاt̕A \q2&P= r1mMS@w26-PNZ&Hv3c4ve/{!NѬƪEPL^1uKviv7d@aOq$OE;KrYϊJZIo}[_W$$v^v;4rȭ}f[^P 5rԢ1MVAa).0*k1Ř͏GVA@Rg A!q{.8SaD2BIŘhXG!)XAR[ mS&PEJ#6yHe}_#v(BRfFvX ,v hrfEN߯ SҮwdx.3 }s L}Pe^iOtdFewvX 5F}7KUqec!>~MWM_XEfU?H]c]v^ 9\W ?溤o3B ӿxG]'{U?rw|,zL O2M7ɧm>Q-W_u5=? SIvWfŝ.ɠkH^wT~t޻'lAsYUゕJdKfZ*PYPU'nUvyٟq:_(SIžܜe95roP|@ qc ?* B?, |W.ngqTVSZS!&Mn[R*\) Es0 څ !-b8Z9]0jKպ VSK`4\6\$IL[;38c#C=CJ^x g`~?yRGw7r|Y\I-j9"pCTs0Ҿca("*cS%}!9b9P8 N mut5G 8e7kM ܳTjVweM!Bn^,[hHS6t N~p  )h œhxҒԊ gcJٗY-IlP> nz'ZObA}`a0ͅ7t{8M5Et<<]u =˶VRI#=]{[1R,gxWSL ^dBp&[k{OqncF0 :kgar?w[_7yּY2/}/Gu=ӹ*nJO=klzQ.fHv )A iQT7GmY/C՟hEeUghENР뻴,šzoz4ŝglhNΙJ;aHd:%j@kvHIam$kx\n*Z!v_"&>YVujVm=mxJY))0N igrO5SLz Vk*(1^i/vSJJM]hV_Jw{Pr@;9m)RjJVƸ R\cK@Ͽ; T}'7 Y&`F|?l¦@G(^ N_37yq Hb3]ZIbLOF`":kazL8"N8Ӧ;Uq&|N%z3YdEi vP4:CґP!Az:* ngM;jaY 8kSQWrIsIB !ԙ$T4_D[%%H 5rMd B!zΩ\X:|nKhZ "ܰYCEɈ3}Pxm*!N $aZqR D2 @ X[DҔ꩕( Z蒲*-:T#t_J7Lt(\UQx<$ɘMc)#_=>"йTD9auS{H#%c= f1WTmy0y~H;;^|lÓ'wN7LJF Ro7IA/ k#H~<-B 3V[mnًKJR%1yU 2T̈ rJz.JbcAjY/=̭:T(J`.+^/HkAߴ)gZD;ĿP~}+s2De޵biL>uZ_~S'#^jHpهYO?|77j8⭊m]򅝽+ʀjlXQ&DŽ6J\(=d~\H-ZpfE4"& <hGY|knXB|\Mf#*sp6:ߛVESYP%X*?,j;lzǮַ2l%e$m0=@jٚנyʒc cK9>1l(DbЙ Lޅ$Ea I/|ՁxEDKjD[䵊YDW3JY h84Oȥ =tA7v*6/^J{_ʙ}b4#g6$dL}ggA?|{twWtDrf.m{\~8:^˷NT3lt:vc]>3(Vj &ɘjU}OJ0Fraz$Mέ9eܕ0'[> k)5MFhVV'1j 31mmkPit b NՎ5B hHp\ "3NFR5k!*sQ˪Vit0Y[%6mA`=$"u̧|Y}Xz6u4GɅom}[&)ߝݧBo 'Qts1m2'b>l(Ƈ|sbx}uUq9nu:U.Q?Că`%gW~?[cRiNltgP*"^W{94R}d{AHe^g Yn=M؈~˗C 3A h]%9@൛ t{{@44Ɠ%:-dA ʁӆOs$#qݯLcy_69u W%qE$>foU\ޥ);[.S~")ER􋶢5FQCv\:#&4q:ƴ䵴!шFy17Z!ojuKKp~`ef-RpV-Y)zw4W|[?n[^K}<`='AR4tU Hw+ E(<9ӤG˃!pR<P{DIVhjEB$,H+!{WP'`LōՂQKT7 E!MκFa(Iky~yJܜ/#~w7?w`KGAq"E>\VRDgVj'eC_%?M|J~\.9?.~^}Aվ2 ?}(+ݧOq7u0*E_]chY#" C~~uվk$[:o<,AI#Ͼ@Q]KV$~&b3s3W( "aʉFUĒrY]ɭRJ6j!D suN)e8\ G)q[iUTn@sݩsWnfFgji76Z8mMq`Ž_X&kּƸ (pz jһ Lcfl Gsf!>|qN68^'˹edTx84 3:t헿ӽ1EkF6զu& )=ξKx: a;rW R.#74Vy}r{s {Z$zPȰ)"UZO29:O9v5r80ĝ+s7VCkk D)Ĝz0w"^ d`B+C6k(07I%+DM*aJp-+6:mc[:9؟َ@eg\IJpٚ%Hp0#ľMt>OK'"[>~d{館=?>oؿl:AÑNUTp' eNnSP0|/U`OyIb7-Gl~q+47oj%-֚-~e\zr3FHI< lJ\H9FjOR 6^$mHx񢭍Tr^ yIc̈f)h]zt [Kj7.A-}QQ jVe $0YjSӥrlSN _22|kc 8Fjyd1 ^|hZycW%TB Z%]Y(ㆧzY/f fOZ{ꕿ,`HL4ɬDcba!"UYw<4T @0dꆱ맊v8t)mfOzؠ[MKJȲLF YU_BM,FYU՝!b|&%*r yǸKƼ l)-moQ;*MdUe}UF헗WuEKZVd]FGmJ;.7ov-\3QᡅYV`#J6C*zeR-Y1s7j]`}ط?~>7vBb oٍ=:B7D Գrk0skNh E'D05(R|cC`ʀ㾥7U| \] sJ9]C&$ZO&ML-:I:clEok>1.Lk(n?1@#my:tQJH` &э^CK(0[JEr.h ^=4 ϫnnk혌~tht%ʐו7z&;Ғ7o]&/-?AJ3",l^86D6Bc!1PB庹cC ۟YW 938r7 h1R gj6}OޚG/`6ޒ.Mk+ m`~;aҨ{9[oƛǼ5.Sy1pk{YL 3'Uk0d%'F5q54lqi.4 +,B`9"A L;}w %MtG ς=2z5*EM|xp jύ:ƭ׵k]agr`KZ2)ܖK=Ӗ+0OSXfw`"-~㎇jn%3/$FUPOu ΂Q%MqM5EGڟ*(mq{D3?DPҕZ-Z) ō@D?&Rt-ן?(=TK U^DU#|iT{x(ր)-/F0JR81J#6W X UZe:j;X-r^\o-.G~קWkr~XiFc0kexT+:C@i1 0;Qnsr\CcvQQ=Y '`7J\R+ f44^}& AF'*nE&b9&nӬGkxE^a{(k-WҢ,Ϝ2`}tuY=kEE&J_~nGviǏltPe3Rǝ"iH;E{h BpN5 @20m\9zTke ,DzUEE\UـBgU ?"'T~[8]$l̀|u[i#R%R)R"L5AOʸ}տ-ہ֔q:PoR,A?U\(NMgB3u>o߿p`fVƠr*S _1t5Sq\L_CƋZʱևTQX6IL|4-2YE*A9O]R2[+#_oG)jD#;d1fh2s\@QP@.}H2]K:hPdSit0ڊ LsJ\sR,^61Ƨ5ib=iF>mTdKT0QG as T#Q"M--%]P PJ;!ate\nIdJ,jj'A5/=["rD6Yu;3ta 5p=BE;e닫-ynjxK֗ o٢ .Ob T;-杽Wf}yb<ז``8yH"9(46~ii{=MF_Һ *=)FۣdA\>Mk`, Gޅ3t8(E+g:St 1{"wwh"Sc%GD#}$ԐdT@Sk򵍖fD pѿs;n1G#(&5yj8vzK'E{%*";zSzZ0-9j& MfĞ>p3 +z-Hk02ZqHMIc)Rn=̳xw=qɁ62K V^ZĿfKTۤRVcev̅yxe?S_K0>lfDlC Qduv:tE@7CxlSܯj^) he .Cv?egQ?L[xضAшLW,A/;4̧yR6T6Y6a!Uumh򜪫=U,V<%},#|$,|v>~nd(g"TK7DPN ] &Ñ):٥$Hsfu~'^FLLB!VkA-Bc9:i&ml: PsM#+1?P Y[aN,0e!'︛nGe< 3w/t{,DWM/,^ȬL#`RSO€d@/hAzo 404ՎFF"ŴO9'<8gPWlܲ]Vta<ʠK-3EՆm`>ݶDki[O^~F14^{q62n k^怌Chj^;0Fm{>-gʚ_QĘj#eݍge$vT}x}%%P RzG@nSϵVW6l1l smZs6|6,/| 9'xHȬOMhV႐<˝R98$HDNn,2jJ8KR3\E%ZiJBv!B8)4Uj&OAh_f,0p&Zݚ㫢"̦% ̾DJPNJӌPg%G#eM6Z`̦* jXP=]9mQk6ƺ z.j;݈ctmd0ri{^ـڰʍ31;.۷yrp+^o;Y2𾫟g]=[oeϢ+{i/=+Y#ְ]kؕOitTîpIh+g&B- GqG@O0U B+岿ry"SUWkV!s}lAz w_h'Eu~_/ۉzY%nFWh|zp7Ocdսa]j,+{뵢g/EATq#']A]̉`JC.s7doDdQh5@hjQƐQ;3]d%ԇʴngbNW|;gre.W$gn>%B\bU /tPaJHY= 9m 1 ̯zh.St* TJ:VݒvXTvbh6"Nj/JzI_8,,J= ]ƮhAz~l˹n6m!n6y#Ԟ71\vDL r?kru9_Xx"AdovonڰDԂ"Ru2: t(v ]ݾLJ f1! ."Ʉl*3GEK{9Zl6ļ(n"}`'$OLjuDd !kAiX5VPqu'!,`8XgnDռOS!{-Hu$|Qi3f~idQJW3Q?&>73J kqrz=bZiŖc*' qcSS1[;da saީ:^;I[[g&M(?Hcm7p$XWZ$zJ8ոǻI t./WB58!#ٿ1%`{9!ytcE$q1-^9T4/J1\?8gK75mACe^J R9Q3pf=c6FvDGJL|DOC[K1N]f QR9ՊJr(;qVBIc\n;1HĔhm8w R%jPCчuڂhD5( / /mWsL *4P1_]Zg!>G$xƻ`5j5#m TF TqrLPDK*R :&eLCnɕI'\zv6-߹g&m_z'@ {n⋷!(9Y|~\|\/Y)hBxu7r1 ILoٶL\ـ7J4Bvê"#g/ ڎw"u .1!ѥ.pK\h@n(CH)RSzlГD&Y~,TEs6ؚM C@3I>oB<-|ʨ.5Ŧefkذ~E9H=u7ALF%o{'nԀW8lʌҕdB3fOE O"ԛ` \|fP֢z>| o.TgQ_/?,(㚥[_OGh(xTrTGTGS,|\dGqzPDmDjj.݌))\oDF5DvZh@Q8nY&!.h),{W̛i*'pH'n͸V6u*Ep迼,ϛeA-L'1[6^N;iRjmvz^Z~hEoZz<|fN6>ݻ {X˧YpV/:hOiiJR//9,Ҝ Tr\m@^AWEdXkC+z@9")TJ)lXjM{r4gz4`WF$[p rh{MpuFߤ_ DÏE&Bā`"󙸜2^[PwDY9vT4'g6zINϨQrĤ4>uiUn|wj>W7@jbQ#)S8i ^[N3eYo^>U̢j+ޣ\J$yEٌns11 hr*}_Gq@p1?C!tXfN6Tt]]_. /&k'BN*!k0*3χx$ $Jzd^Y'mE;ޖ"*%8O;5k%g觕ES]u {q" cb,JoO?[&Mx%;AF ZQ;shè(ٗMY<˻E}ct]$Q{ۦmds{U)v`4]7˻;"X~sqSl}Hkh҃TU]IAWѩK'K }moOO/:}N"X%1 ck#At'y.TOUiLhaJAV6mDLQ"NWLP,H_&L ',u~~=MVˈ# Zxh|#4) ‰`!|]E%,IqYJ -Ÿ)!J霦6KVG~²TyCslZ. zsןŇ PyI%8pEoXt8lvAm$H CfWb ZmRq2 --;(79Q,VsĜSdt)lp=58 C7q@T(qIy_cTr@3d݇"gG qs"ʾ*;bܶ!Lemהw9r~kV! y4"]J^Łjf_&dhV uvgxw%]]Qʀ W4 zF tw{eoe,5bK ^ҹ|*JN׸nxmon7r^;2p@?;_4p˴߇{1Y "&*D׭C`;s*ksrIq(!KF+arS_mk\?blg;Tv\ Zfmk=yHnľ_粤j,g5JE鹔igDƴp&M,P;QN!D}iq3ZCCaw2r:8P99DMдR `U:PY(٧O㗀hgP ۫+ y>?t?`N; 7oOլAn7-0Ӽr6?sv)) 9XKvsXs&WAݳ]$Nr$N1>;gyPL >q.(*-sD)*^hT$&vn}`v v}qeqXXq1aQ˃̅a\;_gtxЊ ('PWkt)U67FJk+ߥMV+7ǵQZN,ʞZבX!A~Lwn1FZT݊Euz}Q!݌DRbLQԅRO]dN0vLIeV_E<g0[~xTXt2٩|Ǭ 7)<2r5o ee^X1UPt 뀐I)ki E>D~1s"֤Dϡ_E2Q'}NY^HF¢st(MM,h464%f[+SZ7б6%N;0 RVC$I$DWLG͸\{ :E:D^+<,dAtz~z^hdgg+&rbDxrJhA+uD"1ShW(h R}݁''8H`4e[mH`Irc sn',#I ȴ;%LeNRDhh/l6'Ȟiۀ%D> Α@:PFy-ϽGA-ctHPS*fqpm@v EP(8ytp-Z-.tt^l]K;Bv4zO:Gs͕\,IVߺxҷ_~zvggRǎ?|HG0- @DG>u7Wh~Sto/  l%I0Ix#PP0HHe+p[H:N"bQ㷓⧃NÙNr+ xXhzJ~j')!DL(;6QmT씢%Ơ7T9J81y 82R/Ra ґ#JNm4OiA0M!8wΉy<㯧 oBBަͤSQ&Pk29C,g1,z*\@Mфdhd?u7VJutX #"HN|Y52Vs"6({!V5Ó7$Ƙк`5._!Fוo*5¦-VJ]D]c( (vwxBfeMAvj9=WA/;zܱBǑ⪢WdR^e9W0EKG-L'# ыBT&-im|[Le.Dn̪qU_8ZrB}IfFW`vq{sy/?~w|Hjz4g(R.VR,v55 6rܸ HdċPnHJɂc"80 h@:;gb&5Fus"jTl ֭{k8չVfRz ޠZɹM~eL(E讼KXrňQ`tGHcRG6AZQ50`;LKN2tJP8H~RhJ,Aq>5%F匾f(Tg7Nk`%mAЗd5?[Pc{zy㵡756z";jwQ`P͕GM:1SmT!@2]4o/7aEۣSWU<;ƫ옇J>UJ n/gs[=6w6.O$և~.7V=J 1S`0~KFtvtZi4g8"T+dL-uWT+U触vjWQz;T鵠~RT1l^J ոGl&@O^L^JR% t!~:砹S-lPFiv$uY~:/5sDzU>K[z~|uv/?Z1-d: GhEa:8<n޵fg{e}̇OٞpYÏt1)1Ҁ8P'7?/9~"Gǣn0;=Ɉ .TNm<^5N'F@&)̥\%r*%-OИq'e'R"4ĢKQ Q(D‚t*jJb@ȤhYN*p1PyuTށIyAyMl@tm*(Ϸ[ .ކ "hWIp)HF/QT*pbH"&Ҥ|e TE RrDo_E{S%{( l$үjE@.xjM`eRn :HUJJv9~*D%Ʊi ,zڴig\iӮWXHEUw %c,%jS5I|#ۅ InJnW0 Ov,H.tya@WdOC9:}wyj&(@a`C!RA Lp@F#DHƈ6 FJmlTe _A`v&Hf]"Ep̀!鐓ɋ7JF!1GYr4T40- [ΨI ,'&9b~+-\yﵡ٦y8brhf6MUN&՜VxӧZaZ_hFB;ʶ=b`h:3w |k/e10b*)[3>{q2)*읝"hgNAv(hr{\m=0jT |+XO Pj/c3[֠&rTQ#QE6 !YMrrml:8V#4S2,lά2UIJ /iO_S`k-n!|298\HƧ;#)x2ֈІAQ'.J):K*;C$vF%v _ktF[LL7*+57+bt_,(,_k_ib5[s(55V wBgrEک"+,f_¦:~e4 Fr*HYRf.oG}8|a=O 1j4"u2p- )&s.H( O5M(=XE;דZǯ0 ٕ ,:ˬ@7|rB|8:(Q6Nڱܫ Q6e(j,9ΞUfYᥙ,_Hc|(3+V'ZIGQO^!jTO>ՓO0̜P@q4  jdɂ]#9 04غe{T~LWXû^L%&Ӥwh u42c.Us0̐TFY!V qUT xb lX 29Uyw2W %fy r~[Qm_ҟw繞J*.➪^ߢo3ۮTseFML5BmV[a:-SMqL:IAR\#Nobqɗx r㟚}z}Gܹן2\·Ys| `op3߀;mN?= gY8g9[DϣYO}_t{{XşQDԖ4XH{Ϋw*Z\NMy˓`"GƵ{;xx4+dcgԝCKż$P^ۗ <[blAϢ@[{`bX#ࢶu8DLt"ʵo{Y("quj,ua޹bM:H^Tjtܳ:5lDyAlu4!>WQF_u˻~-շ(2/騕JW-Nlc$R TF/ʔaPW9vmLP3{_^5ze{^UD=5QetHr5Zk[H*[+us|GV;Bq3Pc- o]ah{%&<Ouo#>QqtL[yꘔ^C4@5 D[Lt@c/ DAcWwQ{ZcXQ L:\*#6_\ݾ!C}ۨ g_Ƃmd:){cd8~̗NSZ`QTSڡD :J\G%t&Q.pz}&7wa]G=@dDo7n̚ g'':洞[5W#/*?5Lû3ݹ\,ƑeZKŌ#(pY8E}Y}f #2wud H!,q):5AP6dj)k):q̼2NoE?aL:Qʽؗ'Rp⮋y*g^EMhtQ`4t=Je[mۆp뛖Zj43cq3==yYق򰻩ftԫmܒ .'yV/6xv{`4P6ݹȢ`)CfV覡-"Wۨ 7.t{RۓxJ_IJS"d4D Mz]k FfOOWMP5A2Z{*5Hv0+C塶 mfوxUYf{fʥ)ƛQAuE<֚"*V+tA{ [Ӵ` &95jg=F9+C4$dQ@r%,Mۡ aX-1+ϭΠCQh-(g## й,ȠȜ=BoLv5Cl fO(rʓd!z}汦 Y}\f Zԩɞޖ}Q{JL0{o?]f8LXO: Am7}̢DEdQ2ĩfQjeoYxESW4٩{Dkvt`I::c3BmԤ=<ԞU{jSo tԫ{U5>NҼX NT"{ }"n{ w<:֢Q#}q6`22xjMv< EB=[DS޵J*S|>QGb?6K`>Y:$^h5iE3@Qti/6EՋ @;]x4^{s_x˷puڿm~͝E߮Sxբ;Zďm]|ʈ.bNYtK+kqmE +kG&?ݥ9+3W'cp5?yVf([ r>Etg8e o!t [۔E]he.Ҟul1"Qq7ŽqMxO4Ck8c,߭w)vyßm/ގFgJsIw/Wƫ_>,Eqś/j-3a3A,JQZ??h7\(%a+? oons9&WߊWu#5oD 8}{!KH$9{]w^F! (_ :oQ-]lū?|ҧg:q;>p7cx}7Yi$5DI.jcKH?zNu OjxRÓDKTpQ"vpES48]N"7v1)ty1ʇ۽Uoب^uMh^)&|[jBu.Gy+7ph/]U<)(v~œ b7fW]ab*MP-Hx:('qwGȱhM%"}FbrBC*{$0F)3P{ SǾH-jl=kOY[ HuΆ _F_;bB;"7"īD~n#<85>kDhxQ98;=mԉYlq}YYFSwO?\QjY;y.6} 4\ixBl"v)7Ū -i}m^ˤ`qLY|w6*tH>vrp] \+z/\xt޹`: qqOߐH!KQ ^Mjs)`M](HEP:l6ٳP} jj,Zr6CZ/Z-_,C #kXbYbTԒqju. A@Ea5j:yߣ'ڙU׬f\3LǛUTYjD, 0ptͪ5jUWuP˦lx{ OXK@*@cZmm&K"F)vrLsǻKpd;[S UuF܀q srchkuUW&}=8pX={XP:50=X;1;49=M}cJGFayγ<ϳ'#q&2Z/Mp g4Ynvf-#AM2tj3%],n5¹zZ)Vջ<ï '^ER8 T:BPǜԊTu\qUq(sEP[m(' ZEZTcQ\rcӇ"\8$pP{p%j5jU#Ulkn-pP/5S֨UlJL+tc{١ď &0@,U,S^<2J<@w,6Zܔkvy8+Km1cvڱvE?n!%u13tÿo(IX ݛ<#۽yN7yn0/OAE|8p-?O?ȧ}%dgkϻvDp)^o&m8 `mqsnE?$g*~stSͳ]Gsns!ii8[|#4Y~4zyۨv#S^Kl6Fx+XD3%7riBƯ2kC\U<ԴT'1pX'wk=oPǃm\O.N}Mkҷ}7[@ka\;gN/]3֖џZǪω+KaK= 9_}9MnɤДq}? 8o\砦 (/%רm|vBRvخ¦Y&jO\<\Zpiqԫ{=ԂY-Ղ٤"J.bNYtK+Ms$ޖ+x$3 w6۴Dͫ`6ꁰ|qκf [.5.,cq~,wk&qFgɖ6_ U!c)SIbWg.Uݰ-\b4I)JdI%OJ|:2 )Kg]YQ?vq5@r}=q>58lp4,NB62<UÏ~E+K|D]$S fbg߬wn]|YDB?Glι6htD.Z׬)ʀ^\*9e\.xRf :@| vb^r:^{AI"<,AO_}O;̨aMo&? |3L-F%qr׋+}Z]ے}80E,jĞ2Pe'w Tq?;P3ՇP1C8Ј4o`UTZn*AD"잢jMq?U $h$!JN_IƳ*dj#l? ~*]Zĝ*=o=+VUXQq]/p pzr!*I^.fpٱDd~"3g@xhʇB2&s i)<_AG–rJ>d4 V k؁Sr*O9 ijj{Q˾H5 eEH`aw|}gIwTگ;TZs _@ Ƽ}닄M=pIi4@ LQPi֛f7h`fi .q'L*PcD iV jNd'L3ʡ֢7zӬi6c5klUZ` ,lf Q4;iceT'eT L>etQZx_Dw#L\ j)&၉.5F4Ez_E*"\ _Ԛnd Tlmָo|}"y9"Յ0qUf0ͪB%iv?Pc0qoY-ӌ6iӬ*Əg6jyMcfJ/#0ͪ@g ӬDNfj jg{Ӭ7fa$ѫgvU,[ݝJ 4xlҸzDzJX&VZbXJV2%-(V$:Pt*ß{U3Y jRW%}zO"h"iVj@0hbOƊ ̎oT`:M9)(ZKA(Us )3?$+5!XR#l%,ꄟ7^şwZv)h6Єѫ 34Lb.ƚ&8`k[VC!#S5uZV[uԘE¡؆ZQ?~Oڸ! |0<mS~njP׵zVg8PePRWg:;:#BEFZGUuX۪5'oU1UA`h6J:5ǨjP ;e97>#Z3#{Ƹϓu##Z jEn2そn9\2-Yf<0󪯺w"lUZ* ˬݸPn-W3*}QA`@@F8fL 'ʔs*a]*L"1Ts rKGBp  K4jacK8n|OV* HiQޛĖgKфl9W0$0RaELdT(k Q#bD`g 8 kCnPfA'S<[@I%j < r;|SZck5$j3՝֪XjEP>jJk14)XC} 5x7QԧT|p{O7֞zH~ վm߶Jօ @C&#[|Wj4ƹW >H๗A-ϽϽ:41˜n̦'[w -C'nffk/?LraF7%qYg(s=f,]l&f!Y"7h,AxhC*wbGP$oKf=d!3eW^{2~Ў_\E#܇h>su5BR?2OY~S>e<]|twg3 Fl9QAwX׳g4yq͆Mlp4cfqB`?2~<,_yX\PD\$SMF盉}޹uf D9ye4A4s.m J3مYX뚕2C׋_e4;硌KeO!_<σB{AL˱[Nk~^$"! }O; ?2nܯL(iT+~ҧ%-kY yܧsi SϢCyhGBM;TI| }iI_Zrzm8,1HaJPk[r:OjJ}SS@GJp9- Z7ĝ bA:1 ďxO7;{=t5HFqn{~yosP-'/}/LvC 7]? xy*?l r'ܪ y&N8O*|uE XJ0JSRN"/wREz & %hD9}ԖbdA&$SQeHl$LBhp4űKSK8HnSH}K%>@?f]ߢ (Hv:=;h0Dm?/6k|pI1bbXQlvyQry7߳? dO6 Zm(Sn05VZ(FߖKNm/@CAqYW?I?e|L`ljK~z 6FC"yTpQ`{6nyW'ax>agRTI}˘OQ/0ZM? x~0b>pc>pY/H+*acֹkfq‚2qq }"jlWO|/ *.t_"(.VWuf~ﶫ8ƶYcJX; !ѝ.up$NKbˉf Mcƍc$,Y!Mc M0^Z Go!^ƨ⧇ҦRR.m˳ڐC(A_@wqywv"k4&@3 cbڞoRLp\*C-kWW}C5J_/~~ëU5<BkmUe(OIiTGhJ[. F xg/[%s/ҍi1 Nϣ뾡M׸Կ*DuwN&}55d~Ns cnI G7r+nG+ {MZ␯E(Ο;M ͺݚ uQE=Thڭ @|,ڄ8@.B|D!t*ORڭ@|,Z$fD. 3='<;=8g>hX<r ~68,O&Xć2'ۛEGSD2Jh$_@R x!6q\r΂u)G.e*p&Iwt,l.]~aXTA@<#򐟮d'&YVZwc}P_@8'Y,7ɒ@#%F 8c0(&s˜Q R$8I#X bn!F*0*űS )4fR{j>3zXogCA0JΤ %;#BҘH:SN}iX Qi$uvzwF bĭe8݇ w؈Kll6V_g$FYgA[<,T %/@o^8A8/1gjem9PVFcLh̹ sV":)6M,4N2Ge;rlg#gQKyd*O[E<Н@̏K3d}7,e\96e\Yy)$^bj62^b^.NG63N#!ZI!O|#X˿P6x-Hw#_li;&_-ߝ߄M?>۵O4>?y+<W lbfaQPCͱ.Q`90~LHvo8n޿k*a5/i3U?F2ࠐJ}m&Q872 q \-CǕWXFVb zIюs{2]3 ʰ21>DmX;'" oȹA4qhd#RweaרL6uL)H$Lp3zgxu@䬕]zdbn~nij tޟӚooX'۬|-dNU>a6cG@.StBͿ5z[f ԾO8 {qI!`>7ֈ6:E|I"LX{E|sW P:sL} j6X#5ZqmG&NZa i58?Xjlq̣Q0M Vщk RL^5Z0\?\'~Ob94wSt *~a݄]m RTgwXOw.;ۺMo͒hА߹N)QºINZ2HQߑc>%5m NDRJ uY_^j%LZ6ۯyȶ6czQQ.Hķ^,o~<ȡ/^l9+TsN{% ۊ]=nQU',V(~ħ  r$?kv->Rus(m=&T歁WYևB3+L?'_fl('0 L:\3GԂG8oL žzl7^VAT(2@KLh05vWje/L_Z05(Z$)Sza+~떎-\ d9$޺ ~Lhk9dL'tPT8E"j,#EĶPsEĶ2Q QDl D]DMwRmVm*DgM׫CB}]X۫*LXjaREy-+oj33Q4K(j Q/9Ɨ:>S&:%:낤%0pW3ʻޒ[-`//Hl[R`EЦcv`2BSy7-#ȳ %V/z_bơ}F2"Y-Ӫ4DiԀ"h0RZ)+Cm.+BmExeh׀ZH^YʪW6+2>9t/Re$桶cB}Z\ʪWoL&28pF?>Us.D94;&ƙ=OM]5ݓt_ʽ qg|ovv=EV%hۣ}bpoͲH(]ŝ^'.GcC̫"q+Ǧ0~o'+E6Ά 7.— θj4+AA|GF,Ż?2$$#-ך>~$ } ~v8_NmZ;n~kg8q3y;e-h`xY:y(E.jmMQݨ7Q[PF)x14eR&xcQB́"׀[5JQ [:*04n P9[%&O쳉Ysbx䏫VTe]|럇.;ʰ[:1&PRv>h/Efoߤ[-q7Vxr\ůlЄ%ц>ZwwJxTh~>~~m yy2eVZ"tNigx!]e;Nގ:" 5isLսUҁo(#Ϟs՝p}Ne_Fx-+rԈG)z@'Pgp͵" E&c "%wYyE$?V^1™$ѱ) zɘtC ;`=`A ZƬc\qۺ)GX>2qlln ">Qٱ@΁iN& :5؉ I-bQFTjD[%{oXHK'JuvRŭZBg^kN46Ꮄ:GW۽'l[<&.OiQVkNx{fȫQ+[ 1[k=Wmڼ2P+5@Nj]N~륖ֳ}.$۴vPF۱gcF aDߓ8 &ɑk3֚|da$Cm{Z Q"Cm"La!Ȳ"!ƒsc!kǂ;Kx]E8dn&^L@G,P^Xg`&Ib/hꅤksFF> Bd!$ԊPd'6Euw xE檅{r<ɽ'D;p3 2V 9 D "c9mƊZγ1J"Y-GC 8j<R[5Zc<= [1E<.yۡ%j+..JDm;5w6QKD/;&Ze';TP&>` eEp,-;IjKR*w8wDO ,;`DeDbH fZ:3wCYDbn@ j*wdbҧdRUwBm!jQ*w8 w2ek3cA& bxA&{& YwC!&j.ܡrpĜFOQh-aX'6^1&94]sZ yĢiQZzct}rtIcMNFXk߀UB\Ob6Q7K&^@2x(ۖ*n*6G&nECtLٻFnWw#a`>d`́LfC&0${FodIc ߷-mK[b[}XWꩃUd,L43p* tB^pD'M 5|Buwn@@** lCl6 Ȥ2Y8 ˛"9 9w>$F~ 7Rp IALSD zF!D&OȽCJŊD0^Am4ZLY"t4ru0]qrOqXr?Se=e$ՐUPe7˿e`2o4SGGwZ6LTcDKm>}>:GGADE!E[pt~6.y?QyI5-(C eW pc+ GoW@]d*ɅB-N a:id?+ꊂP"=M .3:Cpaj:$!޿NV`#@fa&.~ ä%/ĀL'+2Z"ۍj&x+j.V 9{;#M0ڊ6պ|sT\e[LHEe+櫝NHŵj|<_nmJIræ.C\ո_oM"|؁ q_a*@P,FF(VD &Yd!`6uL^d2)bdHx RF$%N2bf9u=ouKQG5@SͩBFm\ox[ݷHhh/Cimm@pn1Fg6:}Eyns3|Ԛp1#BG!luFj Z<ŀBfx./NhZ\6?`_fHiV < kMŽ @WoϓdЍh ~Z-6bzZʵGQǫI,M?rxߌ,^$kv~w(/<[|<[?Gx} 2N,>=n6 GcwJI㺥N .ߑjlmMԷ>m' =ǖCٝj3l#>ٔU<+H :zF5Ul߾YKS-e{?>AVXDǤe=ZU5+q_ZrrRm8W AV9U J0ǼjKZQetᖙn FGC?\K,,QMaZO<ѷ1ܢZUZS|b|'ѷO5J} `'QSڣ:<0G¶FkΑOD>7Ր`LQp*V*B{VLkА'Q:Lmz7ZqV!JcT!8*|jUykА'Q:%uVnx7Ejwàčwkl)n=+{x y*SwwŠRd;U6v>T7ռ5hW*lM#ڱAE1v[wk )FnV_wא'Q:dӻQ݊-*;.ޭj4;`fhUV F&("Y i.梍LlK: v\,9ͻi /daJ ,*Ԟr`)E$mf!T`f!iA0EeE:/(*y |rY$D$h,ljNI6BN)VyzN nr.ֳ.JQk(scԭ8n`j)o4ua§RꃥȾU SEz0h{cFUbM9E ìY#[҂xa"Z,p8jѨ#F\c`Qg6Xd\6y0 ?-KfT3ݚiaf152n0&"FuI'ado$l;(돻]S!ZB)ih!1xk3ԻR K)CH{#&-/{4/tXiG0_o S+џȚ7D;gNX7[-p92P?YH'voKԖ%N٢'H.Š>Ī'N3WX XIڣX)ҸK#޳2le`HNlpA A fêAm)!$Hg; @&T,i0N$tM, Q/zݲLd:)X{Y/RՊV1>E5*+c?1[M. Nч̬hq&6lD;Mr?~D.**(v4s;N Z9:h:9*Gжn>O[.ܻx-YS x?Hy;grہhz/f˯C7cj*ِq&~,Y#g5-9|קa*O y|u`գhN ap`l> ]511Frtq$n2E SidLGuL]AE`$Gdy L@edzOD<(/ _dʒgQ` J[6qTĿS+X^iukQ:WБ}I*]YO8/ȋ^Y2>6_L`>u?*&/ǟxO?~:'(m0G9}Cxr>\^_y`o~CBKlQ4sZ3R }GQV{>L0j`g*wM $9ݨIb:Ij0~h`[c&ˏ4D#=-5X)Z?r>ߡa 䛟uhѻ\VM';yǩ^*3xW-8֪! ԙk<-_Mc C7Ӳ_PWv^9x}:8ַ`{W)2v4l D ܂ei2V@dbT9p;F,\+_Qw 8j{Dpԧ:/R: #Y&Şhe:IUqhyn=![2$pgi;'sg88:騌ZB UuށqOY TzSlƃC̸:T+2#+5Xp1m(EG+R* qFi$pu2\o+tMс W$2BSpa d׻95DYBd =QNRKÎYhõMRsᦅQ],Tg g4\cP# N:Nw7wǧ̅ )a$B R:cpRfwqH b5&6pdĆf.z[~In^L6[aÞѨaXXP[`HA9V(8{\48żf(ZŠfPxJɇI둓uRBi6IM(a21nQzꐮ whFz&XH@ sEFK3N󨛓`~>n<6| j5*dB]DJPQ u@ aPL})OD.0@GΡ%P4*t;90dl0Ópl"Y,BR -IAU(̆PM( J|A ZO=D̳3,i6ؗц*Â#(3EǴ e N g5ﭴP(8`[nLjz;S}FHjf_OVd*w-v j_ؚxa{sP{ɨ bԝE%ƌYHyz?_E2B-׎h׋oggj'n fowV6s 埭o!Ģ{‚EۯF|w; #&Őzz0z2<ޥ$9{f_Y"g[hBzTt}vHƌ$̱@ǹhߦFŜћN:&wVى|$lJLjױY`JZZ%BFevYAj9/ɔd%QҤ7n?El~ z@hC?Rc[l Jtւ|T(D:U@syjexNX}7>D$qr'Gw~h5!jƧևOϿ=(vxTQu'Qyhm%|t-5L Yd4 ҙZ3ZLc&f d#t6HaWF"ħ$z ʘ0{fNT=H@Y/MoGCz{WZĝg#FN` Jv zˉ^*uZ"S!mγӏv?\\PG b.R &bWw:%4PG1%lfyÚDk6Yf͞њ%fZGZ:Zw:sZ:au[*y)U$C}p53muQH&Ȓ(QZ@ V1y噖, J( ᛼"sV$&B]גZM<%)1 Y=!B!0µ&uV(thQ{H&b"@`+a=a!sO}ADG_|/ڪ] LjRmggҵZ&Qg6Rlu%rJS{$.:Cuu8W8H\n+S#ͳPAnG(@͉H j&jHE,A--~6Q Zw᫭sufb!Y;5 BVZ- a XR:堎O:e-NY:$R"mԩQFuRfO-QCJY1U+=`5%srV'IJ qwPюW+ؾG2η䢂@.SKg rQ3 lJV=j%X Z hЪOp.5zZf@qwPuZ (O[S4T*9ԀZN@%9tͪP+VG P^E Z*9Q%TI =-UоW_u x4>CLH(N{\hI "ڀ ErDuꥢ' "uGFx K%9X%KW:CS(Yyse+.]eR2Rd^&R2/vY:Ipw^YIǥNw?P J%cG6Oj[z̨xIYGܕOD]fJP36·xarb[lDKPRQA%9ʿ+jSL?B5q`ʹ73Pkǘy,yoVABPөvf&!yT$$C3HW]@ i$FBn K4{l8Mu^YUyK4ty:ADÚZE5%nwKR67OzfZ)]C"hCm"hAM zm9#hf>cxa=N&Ei[:mKm\p`*9=6u^@'C8`ZVE u6Im7s\tfC-iy1b!Ul3<`5rZ ȵ@0,ܚ*9Py :YQUry)##')r-{@쑤sHxf 2Q+6Ѭ\.j!krQ˩r%fK F Hp85NھZzI)Ŝ 8=}W:+%^ߨs-t9;&*\~8սW0\rc <%!WmmB-qoxC&:I4TĢk Cʑ>8 J#{}l9Wpzkl.\{eRZr-B]I <{ME B #5CtԨ3[dqrP31ucX%XXl[JPfrB?`5K$=[y|ۛ؞2y&BSдt`ulD|9%/׷8zdedwp4Oj ؃A u0,ԓQP+B`zyDSQlfPwO?T&gҶX H4몆S>ZvM6˘ ZSSRTP,cPB|[[O$i%KC>Y.硱݊ur % U&Xb@+ӁRjb!h'=x/rQ& wT{(8ܒ"Pnso{j@E\Oԗ\*%ivgZ1N]v&j]+:ujHӺ.q%U`FePN1bS泇'Db>n?/Vk3ؑ^~ 훟Oqj}ģ@o؞!ݮ%|8P"R:P{* s#{!o|Dꁱ_oTi0[Av֡AKU8cS~ЯA{~,=erq~Ϗ/J8r=?{iǰz\ so1Zm?]ђ6j=ڮ[Z=vn=[R #__F?n6s?>qG_^ʸr)CCM7N] 8 M\ =,L:'gN3.D;hޛ7 )8Q[g/RIԂĬ.VݕZdgW2Xa^ʸj9-W^frNWpvV{_q rɛjrOvxMh"G~l"49 .vgow"R|px/dqz >w_d/8in-_CK. L3pXFݜ\#%"012E(WQIx,4x5N aA&i4~"X--UcgVj:K#눲֨4s,f0pZZ7UWuU0vsŧY+bِ_-0CKSclا4(bB۱TQĒ ^EK&\v"FhE,"m3K JD%?,>M(o jwb,ʜ^GP&jME@ hP1(]~vMF&޸e-q[Rè//7rD+[;'BA'nW|rwRaU8MԂҼ`[j9%BmaZi P%$fzaygrۇS ;nU=xs<Ф  2E"b@j:.OC(#ZXEJPKCJg@J'V@Y0{֨i̢1,,x~(v"3#yԛgΌ]T2#-32h] @2RecnQ$hm`.ZY#inwekHJZ9 Voyq8P/>f\k0j6[ִ2SZcsW^BK^zl>G*n[dsڄ*OD4rNۙAޟWipk1[FշWr]C\=".jǃcGyo1:z0h\;DqѮf0mskc$¦=^x2=/0X!-c<4df+hIfuizqpϟssRzrz؏m+/~l ԰65iy E]v@I~^Qq&"j]yqSAI禽e펵Y^A9{b>#'*vx%mo.Y . j;}wRϪ\9R`T(V`"]*Df 9Ouk Ƥ9Ltc= 4MouI1_+k.2xN*l4W9"*`n&^D4RaK<%}973,T_Q ,s" ] iu?SwE>w& #kݞA 7L@N:'; qZBnj n 5f][LU70Ԥ:KZw&PӢ5^b߼ 徝/&T}6bT_P1UӇ=3 a?%gݸE=ojѲSڷ_]˙-ATkBH'2[jUry"nNd0췧Xhl9dP' ANaT}ke솆Q8<3J 2 nhtC?"CH5ep肄6ED.:tGsgpԼW q۫/|x/C) 9R)R06ma%…ڂi0mα-hN(&vhgG\y(DP AT/F*mLVm>ԏ<)k1~_1R PP%҄j. . \Z6r5\a_]mn.Az9x Z, 'iUӴ^ ~v3/Q[g0Mqz<_";/6EK|skoeܲro4I'-h.́fƟ|m˂-嬻4UmYb> )ME<ܮ^!Ik>M=KɃs-HxcڂZRК^)j-o|=BHǙf M&s^l2ƳaXV|ɴ^{Y1]KT6Sx*ˤ.k "ߞi !Ćh5A$fC]u7v~s׿*~AjrjZ&Ŋ>BRFjaG֩[pɧt}s()wnP74c/Y87I~~X7ne޸yVM}eߐw="h^44f" XnDaL1XHeV0$MɃK2b02KIӁ.ZM=0,o Z:axpRnַvr^>RU>YM1\;U@I,ٹVS[q+ZkRg(cp%"ȥ,*l!jATkBBw`?}K}=#%Dqq5G<#%>A+%I7h"rƂO%`jJZgs&D`EffjuroԎ71,¹g\̫1.3ӉUa=q/WozQQc᱋sM/Ƕ0 "pӽ3yKؚd:p; T;/׊|gN?s}s7h;7f]i/%Nn_|rFAY"X1YtvM&G=pYS.܅"OS}Fu@, bڨNi!TcDDՄNMQM9DRZ &bDLikOx?'iti >ϔ>J&M,G"",2c1 a: G=y2(^F=݅.^͆ƥIN'_.L.6?rpXLvsodǥTj+W*W*WIKı|@L׹ˎoSa' JĪkp,bO  9I!-`>"oU"e~f.!츖7C^^hyjpFEcjV &/w wՑT,{WswJV0ibljWu8zT'^leX .%0r;: #oŖCGSETTKc(:"sO՘VP@: Zn¨l9tDUe+6:<(R:a*Q0)TLeykXn\Nb S9jFi'L@mcڲkT+Mh*GSyUq$ҐNaTK S{TTkM壙WKuTZk S9j0è&VjTgy:&$FS9T^5%9gq;`*Rꀩ4׫Tkݝh*TU'=,O݅r(Jt!#j-DL`MiT~U5a"Tr4WV~RA" S9jx7ꄩЮֵXx4|hS =EP0F`r՚N!Tc섩LԭVFS9TOQE0K4)Ւk}T>L*+BaB;a*Re'L0Ɲ0èwTSVjMh*GSA@xQSzﯮ679|pߦ uϽx?rާ^%5Ms/Qbê Yjj[jUyl l⨽U-}/`y|&d2x?ig&¸ƟmȂ!唻2jym A1\Þ=W_vMls5:7VՎgwCK!M&FyDYO҂ fi/XSʀ5,-c< cN8:u[^=L{XT{3a xl;6ޕ%ڦZfۻ"Cp,|Q5M-]|", ʫ}!k{bKG,A^tc7ȇ PC;|2 {P؜O X5Cd>_snChH3uOA0O8 - Jf plX[Z`F:LbwL B ^j/Zj,ܡ(uwjW# b;O(B] |JZAOF8uBG$*p=yW"[.A^N]vˏ/?$`4|7pr->a STe\ fι"+"3 S4Min&N/` vg'P-nW+h~i!F%o*rcJsS2FH3 *l)]!`Vh|q`d3Ȼg[- \oe 2H_5Hg],ν'1ԾDN rAȷO~X\)Kk[5()wnP7d8~e1hlh~ЃS7N8{SW]{&%!BhT„M9B[Zf #s\XXƄ%ټ)9p\V-6KiIӁ.9$ DNg`fZLa.-{/fM'h@jE22Jf)MQRZear.ULBTs 0hAVh}u?/ o/v)`~i!\Mwb#)'yḧ́,swԸ4i0-rW\6D A k%1$2XyG`eLb(OI3`7[Ed+@RR[QLW`jKm"dQU1B3pZߌCׂ#eօQK<4#4l,s +mrd83ʹ-xnq#YB/{}C }8+Z]&俟j(JE-.XPDE!b%#CPyrz3$z&T.bn@D9@OM 7.ɐKdkr}~s%C!^;Sy7DgZ|j6"R"o1DE">upw=AH JPaŏ?(0T3O>bL%-'LlxFZ!$|;(7c9T4V>-5k^,P9ͳ֭ _rNT`*bOuj @ ]P0h#q<;4.'i8^~m{AȦnT4pYg@6u~ :@<58!MOȦȦ̲Ѐ!Ƕ'eݨ&HݷUP-:'w;w~M-nد\މxP :LeZl9YқeWȔ}+  5Ϋ5XQjGlejF<_{'?}4:50Zb[F5r9Nzn^uZJX{$\>=d9Ixopvud7HTK&8fHg;OӸf =0y2av<1Dj?_jCJnӝKq7I+JRv{eYs) ~EJhY+6vg>Mrm&iݣ=~LܣҫB¢OO?͜N\2Ipй{0(U+ZӇx-FG@0%uc\?Y`"sL}ew4X:yV1R\S& [뀫xs)&KIV .Asu0 u9/Ni82Z2UD/b$rdϟmCޒZa#N]z?);(kU6#xvGVZ3.~&kBBRxc6 Izl$h7^9Ё\8 {APL4,jМj5ET+ba2IU89%ej`?!|-Kʂ4.z B= |r@\QY`yipQO*xu(g=QPn&taOynEo>>>c l̟yl3MZE~:SbuW5PBꁫH5EBʮTWA*;SU\eZRM`[ڊ7-%KC Hㄆ834aȈ c $YB0U3 T ԙ]X>T+ִZ9ў:Tw1O=齃'|ob3q3q0}TBI?1fHmSmB4ŝժiVw^MR<:y΋[)>KGHB5G#$q6-l;$qaُ[Z]C@oBSHr IqPO Ԉ?i^~y X|,!̟؇gwa, .g*2A0j`g,q8%yDž-ޒ\Q>$7)m*B 强X v?2XhLCT '@QLIzP|\:kI*bDk9_V v1ha8Cd (g8WRC|^w,ԐrD2dYR`@^5 wd0.*m>]|W|k+jzjf/FƱ~OoUkn.+pɲDSHI&?!ʔGYeAV#ľK(  MMRL %Ⱦ?\]9&Je4&keqbB0<.ck!,pl63c5B?u1SYx5k:K֫(sMWo?u|yr7:$Ţ*|˪P>PܧC$)>|f\O$*f"x ;DRYIK#} k4G7^mM1W+E3ޛ~QݯXk?Z dLKAըg%gjD#A`H ƂGsDa`H")>l o8H3zcnf{4+M*)crf5%"Ef|j 5/-_;S$q7r@!yFe6_yoyߘ]a)%ejI,(R*ރš?.D֞^z̍GQAya|P{jeKwSMʋs#boy^\צ)GĦ'sK-O\LX$.=3‡C϶Y{L> |06 ѧ,{2~HYU-ě"|?McDaSV7 ~L﮾z/?&{f qP1Op'=% V*Wr\xL\iU i_m+sKN@+sES=jPLIZ0=:zl!<„7CW9y3W`L\J2K7Ցy-a1m te LN UpFkO!0;^ $BXv ڻBqHtt<(!6 +ǞҤKL=I HD%= W##*"$g8rgeY? 3i/?t7*َ6?pֶ&T[J,;Ҭ@4#(Im&py JE_2+$:V> {hu1p/*aNt__S.<0i?҈ghkEY_= Y#V#*֧3-DL"] WuH^9C㇩Df5Rzo*!PWHVѾpCx ȡa[^/g)u ZEִ$SS QZR8 ?#D-lY֖Q|l9H04 [Fl*'c8|lQ nqUJZPV Ejk[@89pX*2HϵJFJ+Xxd[%7&/q5mY*iv%SdB5 *Ub).m|FK rC(#6yIrX0qCNu~a; Ud2;hll'>x5;o-?!>o__;Mq{EKq?57o~O>oW>nt0{;q;9ejB?zOXUWz4/FhSTOB~I~킽{<4ԇޝ=clд`Ł +.+R]PUSUK߻|qVGC1k%x?tt cp?GΙgṜIמ3:E"JvNFjboMڧWQ/nZ]0q߷rn:zA154pڴ0V0'xA $iV&+4'HltG\Ean ]ӡ0D%Q"v%rqyVšJ$Q" j r\.%rDckf53 ẗ́6JC-$Qm=jJ(3>0>P\sM2YXF,~ԗ<[l g TR~Mi.Ģ6'-Dm(O!Z@`?-ԊhAhAhDKt` mqSW1+F@KV+9XT<@ ѵ ^*>aPapU%|5ebLLwPsFЂq*j$ Zv͈`uM`}*;{4OÝyX;- <HZ?Sq5<>MCM0cW!L!è=`FWV*)Jb@g* ȒUIb\b,@Դh&8ZQѰ&j0W4䊆U40؄vțh>q5SBeE%P4VEj Z:8r 5x MeAx8ԦᔩHF$BmH%uD7`>18ZMrj r=нqOL'NUvli!24[PmMj^ZNj!  j meR8-iB1QtBqLC E2HP"PU4%@VYUdUUEۈ1'd%d()ii ju > ueƢK J51+f5%Qt_gl_aMw}U]3heB VUw-RP+5*5梦Egfiơ6" -u `I(8ԝKVP_nC_Y*!eUyVW9Q{33͒؇r! Xꈷ\@p|B).s(w۸"Bui+$CרV͜(qQ8pu*z *,/V@7Er{b۝$fw\LC'ӨIj=@ ϺAg]X'Rv f~<-Oa-7ߠO )CL3} B~ h:m(ECm$~B 'IcQ$ϑ;U6,=_s?Pmծ@7(ܷ^kY&+tDY+4fUZJG,A\iOiUj3G, -ZAZ(54{D64 U[-ݙUEVYUdUC`pYs3}]z6h^Yw0! ThnSr_Ovst"knOj&!θl{;@uR:$(JQ 0aT5ȑȩ <*kIZ?_2tAR$6ʺHmVRK#BQ4r j+ # Q%Ԭ,P Xp)gb׫c.$CTJhQusCDsԊ禵^'1'(Yx^PEKjA3 (}TTSeG%0?IU|~"fYkUQ išɚD654@-[899nD*^[0+{9i"h whw:u6v܀F; !o3{M`0r?G.eo#߷q \jwoKQ\Љ8 MFϽ zƑ&MvԌWi-R ]x`` m PK%Ёf=-,\5?\R8ڐJCM %44h4j ABdus4AsA8@gjѨy?-Je9k&LZ!?%u6SSpn-[ƍYf>/gs;p~az˃?\Moꐐ{w$bl S֔u%D)K,;>wm{Wk;Ef3c ,0ԫM+]OS1\jf-P3K"~(dž|y - d_LWYjO]_arLxI{"dZHÏݼ#cLS #!{_F05Ũ&?GZsKnc mr{\mA kz@G}4.$Pj)P@P_n6d \S)mJ=e0ڦ~@aj#DH 2huΌu9P9PnD Ј:(^=LoBF*~?\NmÍJEԫ. ܯQluC3rU?䭄uZmQwlehS-msM $ܧH]( >šfIOz:v#j4O}SvFD|b$!P)"U]Vr,L>LŦj! $"P2 v>6L \,L29$fX "Q[2y? K,L>LYTsLD-XLXԂpj/5$Q&j`DderY& 4}D'!PIdhԜjerGTT'!cPSBd=5 V&ǡQ+)ZuTCY&ge2ed"55KqFWӹ- -?!CChcQRܬ޸ٝ-9O.;3[k}_G sԄB+Myv1h^ލ?TOq&?şw7'θA2n/x}CӚPV+ PC]R(PPZz_j2} 83_{ oX0QO_xDZeޤ$M1q* 0QiOP$B^V:7=~a _fDD`"B:} WƴND91he2E[Pm TrBHU QPXVx+)Imiw ps[&l ^,7_{&:P:Q05q#Bw, h4ܢ4PK%YԚ$8Ԇq"uF5%" -N e e-*;nX- ,czxdMjh9,^6,_Ӑs6ـ \SfBWbϸ]o޿?nK]5gUg?û܄'9˻7vOm:H>Nk?swǼ\YǪq Qq5pRu~oqWnևp[wR#3}Gv6yz _b{X˽j6\ĽEPuA ;+iE_'C+i 楓Y#\tGPvc1&<'!hZfT&wa!<yagd-^P;ox|{_fyǾKCotOWW9`ES'*e/~5 yX}X$ݴt7/ G'GV\|빯~ΎG?>JQn\ XKj!-3w޾7i~+r<=1ѱ-Bc W3uajHvSxlо7UDfIoA̿&K?*y{1|i _Y<|_TL <mX$9::xkl9EON¿Yg1AOKq \2lԁrqkFZͿmmH{bώAgoS1*TMdx5Ig7|Y ~~) Xcc6ܳ [.}>_nGzezZ/O~Lss[3qAVt`ޖ4B?k#[i П=H,{-t־]$?}Q;cKZM{(T-I 㞁p`sfP43 o2&jn1z%'F r& #PpMwԻn. Y;zBZkʪ,*MʚZVaV:q% p틢k2 Uq8D#šsdVP֊ R⦤JG %+'q8pٻ8r$+ 0A2HӮ;v൭,yt11Ò,l,ʶ`[RIq9 JKD;Bb[2g}T3QR9k"Sxebˎ\"0Z˩6gxl-jDH S& /DDP*6 4Y܈xu?ҝIhMН :oJ%.)W04 P#4 (rJeݠ|{V$~;ERd0>T+ipZ[M _e R7e8<,E1-Ut{@*YIuQS wb:MEG!Ax*x ٚEU(V.A( t:$,-+ jr8}; 8C?@`Yǂ ->^`$$ SZLdaq%pZEaCxVVQ&T!>jl[⃧X (Tλ-'cZ!qf"#R RkܣmBAqM%Y0)RAP* Z @HZ 'Ś5һ|QJyk#'nT"MCK.ӥ~hȔk+ n g%V]Q5b+*{i6 gy;;r $]} ==/)AsU \k)R, A\vJL/f`Rv9ŀ]~pmO ȡ.16w,fᦏ!=(uxCrDj-UvJI'JxGi"Jo<lHgkه(I3}5rol] [[{R43|>́xc!@D /a |`=Gf&e8ސ#-jq+y4- #r’&Vj" I6@=ҬR'RgˁOIF=\ІUкHQQAOe{ Fm)} ¡dut[>{Go!$xˉU,>ʶ(}}:pu_+Qfś g5u-ُ,-(9͒ZqXKcEmAOP۴_Dma]>s&=%Vn?|;bH7~yyꗙ/c':{$`ijbϑ~0b;#~bijLtҷlW -X )j#mM3b\mC:v5j7$l | TYK9s֊.F!)2qIl̐R샸gvmM]}qSW|nzzO |RA=a*#[j7viyo۠dnv|̺Ad/fI ca=̓~=̓ZߋH!qq]~j{؋9&fp"}G{̓4e+e=d}plCkYZsP$ )M<k>mmY~p9R[t[7dU̒:l xbOXܺPQ2%uP%/IycZSdK~M {;SΓn֥RL}z WޜA W&Fp Pϱ;8;{y 1(U_%:=r]? <κo34󇻚!K*3!GTs@B>Z-Tk))(oE;ڻEq0 CO \!#+*AZ%8db?y$퍡9 i\Pۘ~ԲaGzzamM3*}|IDs G ɸ{9 셹ی sHTSa|Ħ:r6]B.:woԮ 0=-knP4h1RU'^H t]lc |g9|Pe;l# m{h̰#Rim235o`>ۊs 9ޏgH ecԦ\IH<m2zϫa={k/%־5`?қVMxs}tN/^w7y78F(zqxRhC?WoO<ģyZ_#׋"_>M<:^o;y@Krwt0~zOpntG`{Vg$O{y#P ='g.k@mX*B,{BU[)2^4:}'+_| w8rrް15ԛs6٭y|zV5_KWm^n)kr\/뵿?w'Vz ^|vu"w~Ue=}qAc+O-͂\/<Ԉi/fI^ԀHm!5yR{ejX S\}wk?;ls0jEfwd7|wdw_8x;(gX쁈=\OYG;cst')gpr:=l괌,mzߍ, uJ븩 hA֡VE\_vʟr-5uٿ2@SUz~*%pDRˏD6H#Qh\А\șsݧvnY:\P}n\藌`|{1w l~ Vqi(Hm7CB?gfa6{mtӻޞ;rwv\ aݳ>͆jyi _?;,| p6z܄{}[m~>xwyr׿)z|'F!b;ttrp۬yc(5g) GCP8j}% D 7pc(\sβX$Wڍv6 F IDB/LzPszAagB!B/ 92h.49>q&59Px-]H+(y>ӅZBZ.,ǘ!Ҋ!J+0v(h3L(Wv۽Fxc9Y |ZJf 1$9Go02EE4GrmqjjV*2 3r`.稼x.* o4!zYR֢n\c߸ICkg^^ثP>뤡6PXD]P+("j˻yu[#Gax0, xrIZҸJB2]Uea1FnUG]ꪲ"v/b-F]UiM-uUj[{V㺪,LJJ: [h-& FxXrb2Tc=;1nf8#.i>h 䑁p ?Z&YhMOI"Z&%n0gppzˍ> +9C Lk*Lݷ2<5n$Tt0f%2A0!)l)iviG:%Y28J=ӵ8ޫ묦:!J%cvGkPv9Ԧou|bJlŔً) A|xٗrFGZlZ>=jlb>Lᡷǡ7qr`G9vǏc RΐuA@Cm뢃3G,IHO],9̳aJ2ICBVf\*xbڰ$h3X 'A3f!j"kYx` >8sq%Y pt[o׵O5ixrIdd~LL0 Br\, :E}3Ji$,O=2R(M7iQZIfpWuVͶ&ԹWy%5Zw7]O(ijv6,m۰|m`}d_Qk]y U8G^-#GC4tqPkV;= !١Ə8ơG=!1 jcx 52o<H-H?ϳqKv TM?dvk'ܵL9i%'pEÅQ&>O>ӱIKF-%h|WIqˊh)}b#>Ջ|YW9v51̸5q|kX_k_@mDd;^竏/&pmI[n@5!tq3="j`Rz>9@_7ƯkH{*w6RQ4RQVڦM$eFShJMEM}xs[0ޚPK%.-~ JL)Ԥߞbh}+|s^ig3h^KZ{3ZMNko& قZj6fTZ.B4M54ʤj DB2jكZr@SHD `z/2ZQKV[mjq?Vj1Z-FՂ͜p!!B[ND2 ŨM~$~fU@0(ƫ{ bWgZ ?E8,s63.>Rj4DA) 33t!XPK5X`U Vqˉp(s}|>ā4!ֵLSOE|HR*O{ߜQaG0rYC2ù.$4!av74އ.j<Đ 됰R%V$!IG]Qs<2Ŏ: &#{wut,jVKXBB]$i ]$i { jW}k&v7/L}s צ}DŸ-'XXY,2~x*0yrz0E T3LQ&O)L/wO4QOf 0B )M: 8&a(D"aR[і)3uVBǶDWs^?]aH6Кmmp%iK&(d #ș 8$^hN.8M<8k-v@%5_YMZ?jA PIW{ jn5em"u@SN]/#-HR[L!j˛K%_kH&N> VF&PGJu$#Zf5eUPsMB]MY[mje;Pkj5+*tOԌy5`4|&%slP.qAC'q#.h] }q1F\:BZ:ʥU?rI}D4ԖPCuk (wD?rgZULB.EԳjWBģEa6C^G#1 u]4Pkkh$_fNuB꿠1uPV)GOFT0|קi}p|Ef` x.1>G\wqh9YD2Flm a Գ6~Iql䢒22L]B%t37yX`V ʵ^Zj[X;w/\y[~&?ϯyY/dPXv[4_$n IV\֖urNPk[lmCڅ].j`qvP5ԯ?ǃEAk̬5j)BJC?)P88p ]HhuKh FD]K戨As] Gcؐ ܐ={[3dפъZnHssb. DF"m(&92~G9$F#@2Q7㱧| nNȜ{xzx[`@ -"Nަf&PO FL_Γ!Q: V38yp3O;"k}-zjg>VouVi{uՄ9<͓V*S્5ꍵYN:6Ef|v17Kw q}'3ED')ryuJu J5{8IouL 9k|XfU jJ1 ErUc{ %B)YY;I6\0GOweH[cqF23ċȧI3Y^q׸>*>$Z1 vꅽuuV0պNEC]u®=S:eSGnTFW6 xh5gvJUPݡRchţ lOIHjv>ڱPl$8ƑR7ײNjDԜ3etNNYxK] hMjLYx_/:},'z*%- ypp)2,36)tN%uTԼi&Rݸ~[rV{">IE8 5uzA-kon> IR3 } 1G%(儛94 zvϊ UTnB4 ͓-բ(YxN_Ї>N"t[jO_j9VX:ܡvj96αsLVBR iDP; }\ACm BLA]!e4ԢlZ2BLD 2 Ҭ ٲ*Ԁ,9g2z<]mu`CT?DCT?DCTz% i\" Sw!R֊w!Rjg))։Ua3Z.E'Eg*l]P;Ggqw6”r}"vqZ>,Cm)5슀+!\VKAaڙbI4-0EF-utG}rcYG&GN\(HS腝 |~1C` ,x%ӛOibmo,!m$>DiMܔת[sSZWknJB-PC', Yނz:,oTS_oVDԎ66USQ.'TԺ6iTxr?C{P?꘺L-eQ=E YԐEȢpB'VRu?ꓻ~ېE,+[<!3~ʢ 0arہ,~'wQ;!(,P(yVE鉈vN 5-N6.=P~ƥ'^HՖq7JO4 4@sˉBg nرp3]n10 c"gnbxM8Bc4r 14we8$W.DB-jkD 5UP׎Un*L!XPb**]_N Ӹ@DD jiPP&%jͨ?wUcRː$ IB$A#AkEIӅVT2ǶC:􋏧_pOɭt\_eB43$Kz[\;pzq~>X?/ޜ._D#/ciHWnҏ,ו1^'4;q^ɀtBO/E"٘9G#M0v>L/?fle[ǕX0K$W羅FNވѿLx o6_ܼq[6D_?r?bs~7qcwRD">-_ײ^{UvW|lɯ//>8 &חu^NnJ`q$׿r1;/D*zO E /-gH/35UuNUP&7xr&Pf034! L(;U<XCmUd̨ͬc0B. B`3^Uھ1Kn3^ϨSBo>Ff׾}fښ֤vD2%Mn37PkP::%Q;D :\M:8 tPciT>,!:&_܎NN/Xg>QFM}Dȣ#m~pQ7&:E' ?knQ${b Zt2@?)*RlL: HD33iGZ2 3=NgY+c|mwېzO5,4z>R~`H Ar꽷j8z$,rPǠ Z6*Wo|')5u ~-< Lf0uB2ĸco>WZ:xZ37G7 YVP捅ںҋ ,^:4 u(za>9D"'w]Y@'Zn0Q!BԽ-@v4I&$oGV'gib:8@MmF({~Ya5ri6ji>avԶRRDm-vl]G=NH6p^WhDW/z?پ"E&`]|FQh4IٶBZ;&ʾc۴!t1>k{pJ2j#a^1cq,f82fq EET\cPuҎ~iqCvnq\>|_Unq:WjHFx,RkL=ƎԖlz'PE-< q\-71*!\և^'Ke0VH[c͠ު~h5/.`LXq⥖Q l^:D;Ew;͞Q`C.sBhW٪uTдyon1T0;k{GרH$Bb(G ~Ba{j:.D)!xF M@sxj,v)h5 :ЮAmo+C݃)YlCZ_RFBOuCQg驽 .e {h(#\m5驝(u^؞;u}\x*fP.uY.<`cfPdrY.2eD s1%(482*ay*ayC 7rP*o;9D DAO1sO$:䧀{jB姬UJP[:W:,?1ZO[:Ok`4 UȓxCAM!$ٻ<"OyR$xcȰm<*<* w<-DhQ&`ZLԥōǦqF"x:Ef/=!x۸&rV؄NW tHMشvh5t06M]+0 WEXcn..l]`lQ#!)0\fw3TĶ`Z2Uy)B0C\].dB zY`P"2;TaGl+t*3`X0_DMr0 8".|rD\X mPkeMk.\m5QCmq Wq]'Eh8)Β'm"tSRXeFmk0{{셲"ʞV]SaVʙ 9>9@@'՜gU:Z!E㡦P!EC èOBN9!ND<קXB9jT8ig~1(Jsr{Üv{jiJIOj]'/1]P B WHı\9S€8_€::HO;Bz YFzjtݑtܮ`FU:aҷLdž3$9R ڔށ1Cm]x# !TPBus]t4Hdlr-iԄIxz~vs qiw9~^OUO?v;;U6ofxqxyO߾3:AkJ\m !ڗv9}"͛L{ /2_2~'^گ9FP-5|쟮v??y{m"sռqY1<,g(xn4дJ]"㫜6篓//n|~9mӫqN8}/o a^A ?޾#x+vDH/;7nvtݻkool-k׭Z*=+mxyWUP-Q POGB ɖxyUP< 250i @(^x-x!UP,Ԩ0VAPUPYQhW(2<akC:ٌsj$*dQU @A Jb)^Xz/5)^#?hPB +')5nOnԺNɔ3tAiw}\]QF>y6ծOM^.LO} zk,)߬_5+%zqg雉TdͯwYߞ&]GM3k}fcLtvgi=l2$3|ѕY%mCۙ`\!E=^륻be^\[T@Di(Yȕl, <z,k}eՌScD]1cC PX[D֠6PRSj<SL*}0:X}$VWc%\a6ZK Za}7V?t^`iї!PGev󦶎cz 5:%qLı8f3GyitUIy}UIm SEH*BxBC]m\6Xp K;D$ʥGQ.̳t} Z5!,hVC]Nn W {;w#&D4! MȁT۩BXf*g%-JwQj4XBژ*PCC:ʨwv:yߥm::Z3yTqe+LRgiens~*pV2ˆeN3PGay~2eVsEC]46iG4t0Cy`0hjʙi:(ጁMZ$ý̇tbҥ1Ccl>5^p1o2j /l~o2c PPEz$(`P+ 7.`PG(`=VRuDb뛮SJWAPk[= &f 5.BB "Mfs.O0-3jZO{j%:yg ^Y*N) TARkfޖw Do׾Y n:[tIS2}3jc]4>(,kePT`&QDh;CMtߒ5mzptqK_v!{R;Ita3I[3%T*}(٦%JLM|888\Np@ pS4Pj!UJC!GXQX2k8aKJM$\\ eMF%(Ԋ5 O&v8 Ȉ z*pCu >Ѝ H:qAZ(݉ 8ԒN\PEH"t z*D; :p&h8 j5ܮֽ( AOjqTS9 AMYFm;Q9ixʝ")l$pW[qolq[xCGGl|-Xj_*UvJj! ZrщP0XG\HPXbvaZ2(8PM&7g\p(Q:I |"T:0JȐ)D2\h"yP^"1R1^*ce%EІRŖUE8U; <1ɼ(4'֟g 235QUJ V0U,ZR'ycӨZ_Rj oLf*˗ 6L@="OBy .]Ld} 䧩Y6dGZVL440ڶ>A'H 9] jjg:T`nTjSbQ@Y;"J3щHԲ"d"ҾMku.vA*UFB4y$j86 XԒ\пj5'}A}A)8g]8gE_]X0z{>گ$CUֻTk5-oQ>dEnzj˨P3}[goO}:$]8>E-teWOQ3"DOpjyΌ;*e|yyy6Oܡm{7ߓ6|z{=t?gO+${Z.7UANs;)CF99q9)Fo݊*v:N38B&|#%tǫcӜ"~p丏f~dWc|~rqȳ ʗ?֭Vů/Pڅ~-On[{g[h4;n1% a)r[\ M3 JYp_TU#xh%D~ߍc7oyqj}#kޗc᯷ſ>D}늰%1[X.ye~v^eX^;nw 6@vצMx u+"k%2+z9՘vhuOY^O$PԪB(W+Ye׉r@!>A)D0YP$1YVJF2C/K*3QpEmWExSl|m$) <ϬPḄg**ɟݹÓ@3;9[ 3/}3}0씌(̈# FU (6$D2 cF'h\fp%HCY,SCs JH`HC쩦k9#@IJ!if 5y B,s%`!C)ƫ\"e2R72.\eS{WGW[?u1|d17Z.oWU%4 Mϙh H &h]nEkZƑSމ8ԦiƖ Hд}*@0gYmsS( St$l܌Q?RO,>֊>ŔJ/ p-TKk5Q,qaJFrHf Gь2+T6TU[,FA7b"Qs @BpB pO/((vaT^fBJ_yȗǥoL}4b5IMMS־E -D׼dB5dֆ"GG { 6Kj"v.cBR<+G"݁6+Z`DݴALq; ԝȻDmL'(Fj)ew]]- 4-%^_gd&9&vsF}DPڍ8Tt'ƚJAzXb#nyPQ:]Z&: 'Lr tqgSgѿJ"smGE=o,qhZՉ/@M o7s4hA+R=L |ܿm@c V4e^u/.Ƞ23~92{q-g!ٍpmko#\m xkFeiBA_:J ˜V'J.'t E|l~VrxRTٹ#] ~rp,%b8,>ڠɃ7ffM1ۄX8FoaU8}^)Ж6R#9NږN1?s+ 6Q$Rx~~z?DעmG H>;qT/$BH˩Uq_α<$cH+sXzhщf౨%םhZ5mar0XqBmՀo7 {kM"w,Z a(x$3Rhʔ\:FtD&Qe1kP3`ZG=}:͍B(mv*,id @@KhL nd #B83AUAIjjt 6 ˍ =\i4oY_&<EU*Gn EglOpmFj!{xA 1AF߮"/՚'. :BS1E՞e3:Cw$=zMy<75"Q>cCpzq~w<|85X{Teđx{mgDpejT^]W6+C` тWj0}ܞ@eSHNH~]$-{#:rt̒~tvv9u| YKx\j\Zǝl _5/F'ͱC,?'w7ϸ,ɔxc5 ʎ8\N4o^8ХefE[糟S`@7˶vO[?PUhl!6dε|૸ bjuDDf&ވiDq8CcC-u8`$j:Q0N@[ց3D4(I>JAkG'|懄_ET*V9k]wÜ=7:>MӒ{fmaSGǐ= OAGT_?]*fO+}\kЯn˻tvC3/vv~3C=dn>ymn)IE"+%%-s^G.iq&:EFMZ2q$09WYdĀkC1_VZ ) RC a$zI9F!NvkG2]$|[&Xqgi!IihIh,i1X[ @VgHvZf GK*cTQrIY4D02*J|9!&p貉l#*ҨId9F5ޑ3(ԲIU2SP+'JXI Zq}k9}C!q5HPuC#|}]Y۳""X$H6";QSm.j>Dݺ(ھw1]bH7"d j#4hڐ'-U{ jj}[õUSpPshEtъCMI78Ԃv ~;Z;Zvr? v6( Z}TmH `B'Xp)O.FhR1] k EU8x9xk 2Z )J:?ZB%y6|&Vz`2{c'H,c!:ύ@W[+bI@ʴ9_M7p-_c/[=V񕃳9LUS48#+nLdj*{Ut*.iERV *zn ܰlf{_fQ,N\0 ZiY,)Bt_}6[1ZPlZ[n츖7} >?d|]{32)MWT)Y\Hu,&nJ>b"y?lƎ.\|/3 Kƚ$iuK QURcaj>Xբhy*[ͅvp_aTe+fE?KuUSD/_+G*J5{%2;ɽ7(b`hPtAUjմit ^B}ZT U "}Zsܠ*pWHv nLGBƯ$ #W> uP1:"DQf{A@ӸFvQS^oj}+a)ɍȰZjC^ >^jvFJVz:_A?{ϹVX]؎'5 T|vy#ܬ>H¿yZC5ċ{@v]:ܬ,K#c`d~WU2H6tCYֈV[-qg΢I<մ߶t3\tA uRLjnG4f$DLnmpg΢I_5Sقʜ+Ý#؜O,YaK@UӼ0i7Hד;+|vԓ2[&z˿%t1Yy=<{CmcaU[cUZƜ[N[n[5G# WF*aKK egY),rP LJUK7I$y.-@MZ,?;H I-% 8(5FJ2plkT܈_Zw{Jٶ'WQζ^9WF|jk*ZKE|{PԅAu;4!BH7iJOVEģoU V a>Xx.zѸ=5'{V=>՝t"m{Z?Zmkp0j#E\p0L8 $JSpNé~ۭ;45:M\t|/Nxw<2>w(DF3^Z)Dd- l)AKUsea#Ѯ%,XRn@H:@or&mSufiZ8QM/2PLPyx$q2T\ +{|D% 63鈔X.? bZL^BY9UqBQȑ&F#4wt)C1 \8"Lyf1SM8+E2cJjw#Üm`OSWֈ[o*ڽyzPQ[ߪǷzMG8+.d|+M(o/;__DawR?vbhz: qv8wjPЈQRBLrߛHru0;HQ kG#r5lr;>I|HI1w?F_ = s|JhAI jZx52cO%v>Jl<9#[9߮4xSpU/0𼑷~sz>A_>[3]}z1(1g ~Frt̔)~#~~x*q7'rrcOWfcC$t9DƚL<#pp`O#(p&ZDܚSEKgRLεJf .v<sw2zq&XٶgGYa5QdenT\2+3FFZ(2PcJWjODvguٵ +R_b6/9 [^ fEe-rЍ2;_~(&; ᢦIUgg*6#\{ \c&C[;;>;$޻vHm߹=js U HwV?xy{\iZyV#I'{RO,ܶfj Vz>_?Wv tVyds8M^_]1$|)ܺ|{g *xLa5QT3C=EJL|LN'UMb0"5F\Ѡ4 l˒sa VH0"F( su "A&u&ȄJFQjsGj(CeEuEPPFCRZ t$!F%MR\*KrT. KQ^ZU&F\tRp%W\aKj8XP:JjݶN ;Q+)+m+ {Y \CuZ(\A5b: y j<6j-DC/9C/*/IH::VYט(wm5뫽aN!{/`2>[J^~JecRg:'û^vfgD*N 4?]Lʯ.^8|Vuc}&p~hSy!e4aYtb\9 kJ<=Mm᧶;3λ ϧ漿 ^|߹&BR4AMu<퓧ՑD:JbjDjJ;FT?Q7U٘jPr EZUS g^?IS/ak$zMN.ߡsI+F191 H#M ݤsV k!,() d xO&YVJCXsYdfEe )@]D JRЋy AgIZ?y@x! lKYZQN9&y.E*sm!`v-oYYnPF%ȥ XJEi,K(8IJ#I 5ړd 8eqaԚ\R!92Tf(EATi\y+(t"-.)ZEQkA*`(jMb  F-ΐ j%q]ԊS_=a;3%e^$)xd&1м"{UyI4/yYG=Q̔{NOjšktO9.IBls}0av[ϟ~"o^ N9Bd" EBGX}v8x|i]]浢WFžZ~m4lϏOYgFLf';h"O;j~j1O3O>gw鳫0c|JD2U@ lNI}QYtcLr׫_;JΒ8tRa(YXN& ~ "/> !?- `2s?Os: AZm\ߙ؈J3U㢸8>EGO^R AlU<8!ޔkOpԛ53P~1\͹̓bҬyNPFXX2dCMsm 1ø`as}}u,zk'k7\o; H EI?^Lm;t`B(ZbQhJXX.t\@Be&2*xG1D򒝳KZ/=g 2bqp "  gEft) FV\(X*w6ĆLwMʄ!r0jҲb5W4N{5S161ƞDzlą{~.\'GB':;j>um/l吕K,v9> y&j+IKѠis1( LsN\2JZcެ\4mV~%窽j  $}2Egq}U#⃕'=J3|NN?=9r+S|*ʇغ+vv^ߗصEW4ҍfۺ9#q$GVzpF[*zMXjK-x % 1"˰+v)Y`Ŭ^AJN";JHOZ1dY|.t 9 wZ)ZT +'$h9RږBSF$@җs*k&6 !")@C ɋ(2\%JG夐5 U V𢨤4*  L e_IYI o]C}BN F/`j^\ҥ\V#V`Q.}IE 9< hm!4Fuáǖw؞YQ?m 7٦FzR-[EU# Q~>t*F֤b]oחMtK7wf?}w_Eഫ~97h]UyFKkgi'7_S%̻ ~畘z~(8pwng\idr okp1 RnO)\EmW 4;E]MwSδ0oD#P1M@$H15D#HT'H1u5)F,F Q=๑bn vTRʼ|G {@*k*Q (F+.JȂS<8a\-f>n@ 4+>{Ehp_/7;&Inңf3 i—8Tr Uآ0hNL7 ॰ZŽ/3p *Z4(Z4x!jN\ˇؑ15 ǝcV*?Y.zz\{e-|:X/H,Drq\\K8a_>B!KL#J799hCmW G<_7YŔL ~;q|5lH͘o@o|>Htմq$icPR&QMZ$ii`U\M+OPyJdR3H%j# iuM,.,-/_i\'4s_ g_q66Wggoy :Tš=ZeT4kq%I#Y4[s|-׎2;la9r##Eå"ZL![jY bPS qk5,()|xʗz;EZuPXaXyvܐ*6/~JN&ؤM`c$74>A2ihQMد N?bI$ %-Qmӝ+yF+pZ]tl߮fxcw;kq+avCPe5Xg^1GگYE)7V:/:R}Z/' #58/vV`/Nyz{ro_|H2;f[fWXЌhᵦ<55cO$ {cdI@3xA-:9kIyb^ ʎVj3]#2M[ˊKdЂPJw*mExmpVpPZK\a) L~ٟnhI!u)b:Y`!NCu\TCFG-5˰ ` !׫K˕k(sN:]QQ ^)% $+MYٟ8eRUmԜG.Fcx9=]0ۛ*!A?k#Aww-v1ş}? 7:reP[x};y/>E_߼uͯ_$L@u4:ݷGjD{jrV4BԶm}lu8΢x]A ݆ u>#Fהtm  ;5&KA A }Ft{M+B_L ~P6Y7O)߉xg|6Ȥgo]gy7}jՅ,N+.N]_ٳգϼ?!(xzgLބKךҟVŢ80䳡vPMqw&P TT[cxt)[㮹`lq]JwJvB$jv{Jc WDtf'"H?qyݑGOġvp{qI8r 52ȣ'p~IV֧3cPW8kRΧJH:u#mѐZŢ͢+H(zGn!OK09ԓT'|U/0x8Ԍ$ DpD-:x<^ /Y|`S.Q2*T(&egJv Q, 4G@+(va" pZQQT|(>r*i\!'1!5<Ё8Ԕ$ġ$G:!MRVy@::o(k|#SvmRk(/--\eTxp([4 J0_ߢv QyѠv x x )w xjt xj!QPrilU6B.dU6YͪlW@ɧ|T~SO.z K-&D-Zj"ZHԚ'Q5 :עZ\kѺBDvIjnї|n-P(?}|$"տNfv#IV(?|QnH@nO3jJkeY[\FUWdQM6h2g(}a^{~iS{[}DחG%١NJ Z+qYDUڂ:ò{qi8[h+1)1Xв. .5D²Z1hTf  5wBZ9"Pl]5UyS=LɣTN%(%T f*I*7D`M@c3boHJ$`:W 2\ (\躬8jǔ*-Dծ(8a#G H}`PHTշ;UW@.+*, 1B!z;ڷ$ln/0L0O?`_IW(_X2 'ٯ rAv9&Ԝ} 3~Q7NMt_$Fy1Zt gyD`JS!ra{r"!&]IB3S`pg}0̤&!| SNvB6YD$#t~q{{Gq6jEo7D`&XeQ0FNjDNjaҨ(9LУ-|e8XeXEO<lݵ,O/f@- ԚEDZASWT|bӍI8h Go4fШGo,jHaԗ;5ɵˇXC|*(E-Jd0d05~JAm%d0 !"EhFF6J"Q_nŒΑ}-r S/SFncQkơFD"Q_VmmtVC~V&9K}tgѾ85>8 1}}>(dk!$Q K}R8uA->>":PP8u(C/Q+8_}4!ݙC_|+[!JD-i-UQp!@C]H)9,8 -U*)!$0;[%\`>j({z,ȷVILUXF:Ics

6AUѫ8FQ@Q@}-j4s*GrGf&? NCMIvJ$jJS"Qol\$NvJS_)ɓCEq{יڗs6arfGș9+DX`M4cMIdvDFS$+[Ԇّ3;^,C؞иtP+F1Jj7`dw!ԗ[Xew~vgw~w>ox)HpYġ7鲈Aƿޕq$Bl>k.رk^f "&&:bՇʪbNÐ /##"ʈQ,2Qw-8dZZ;Դ|,JȢ,J")R:2QG{sݙ%E;5z+v\7c-KKCdK'p~כPk߇jI pyˊ-4yH9mF-w9%_%![7|gtG,Ps6ֹC-EĂ1(( -'IS9P6Twjw1 F"[vءfbIuTwIu7|H՗> 6zPLGqfzlClG9W+X9ĺb~uw_6m&j".)gXdnsQ34KQ=D̈́(ے-ےDhKW "s"6--)ے}-iFIb=7c=߽ys2٬.~9n^pyls}4~W/=JV.f NpFJX" N3ܐV32ӊy*NG:B`R6fKk`z9,}HOEs;[EPRio^~l6"Z[7ߒu8oa%9o$t4Rr_̷)&/',B;PΞؙ=9a>sl盄Z䇰;yb&iZc24©ʘT۟|3}*ygC?lcp7]˫ D!?)Rq~7s9br~rx71gn##l Hjur`HgnXY1`@6jy9<~Izu_nM\]\-ŽWq4W^^m׫8 z o3Nr)2aDTL#]i)c<АS?ZJxͲoM6OOzf1it:ԌԮ M= ?1mv+qTʋ&a&-E Bh/tt[4r2_ԯv=(pKI9fxW L|[Ϣm(Bz塽_Ն}4j+ ^Ǹ^@H!v1Za~3ǩnju_DJ$FQZ+z#/^aQJWRR(PS-Fq25@s1<ԂQ\C-Ee`jE(*l հrŶ\m6K8ePP-5- >eX#^h>:#iWD:T~ˤM3HqM$h޸qyD@"FnKuYKA[ktDyM%yr>7ʭrj1,A}*a ~u Q]<_& ߩAT!R3rz 6#_狤l "*ff/{jf{?]]\vz;xʘdj%=Y xz O&fryy|9<yN XTĵ&߯KxVZSJ b1yl|A3 tdݏzC'8T-yn)M, =D4[kԴD(7&mtT)p8le^y8i+RA]]v09ADª1W2Ӏ*blor;Xk.LHy!d|i$㿶OJƎ=Q Ijm~uu۩Ps/jZ{鏜L.kF\4[T@ʀ~y栓P%òHmt;\+v=O |$a~1ypkX[MG9E, Mtc|g|y:W%0*!sE ,pGnzgK?DHXHr<$Jjy.<Ԥkrpt>gQX%HZ%HCuMFQYZQTeh$uTmp ,D*qll"'Φ\VJ_G.59qPݥ=Hty3ߛ~FvppLH]]g-5KgX))z>X$>N%㈑z#BPG77cAݙڤxnYN5jF.=`mbvZQkֲD >,ԴY-XjQJQ3IpƧ#Z.Ul^Q/1Zfw@&.4>o'>;MZ[iOֿ'JQZZl7؁cyHfH#Z:Y;K/KKg-ǡ끹Z$Z#=b&jF\BGpCp)-\,\-r$r%y9G<ԂQ#"wKhOSJbKIJbyˉ0c.jfFŒQ=D%XŒ%XŒ/#̘SwL>7Zo "B1*+X"0FZ0%X`A qSL2$k7I@ CHyVp2zF8LmҀ` vI00 ~+hj5,~Qs [a xC$>1"FA}όFZj$@ab1 H;,E"$ ~^QR([VQ;`]Kj,;`j:Dۣ>{ZJ^JK q)!.!?2q8%j1e)}wZ_k䛁}K$7Hm!ӏf} /[lNцۚqAfƹVXPk(S.6ڰ%1$Z;PQY]~htz;ϨTtmh)d@[b il7W)P`!W/\/ \@RnKF_G)Fĥ8%&U3Sa)MJgdS.J @k}daԞ Y^<(Ju^PP)-jV*#z ) kU(I,c-FEk6 "#$(ts5C 7*4xW?%z'Pt7# 0xܠミ q8CY0ag&:\UeaI-bksTzuq,9-P^0TXQɕW+W`}V6a{/3Dk–|\Μ6^ j+ㅩQDa@8NhQX,C󃝇—v {7[Gf\V㯟3S*V,^)l421z&C5*Wxd? 1*BV*s2=vf=7 vYn)|ޓO\d3^E_ib%Sc=m >϶fsq!rl*c?TӴvV`X;.~f ;{nHC~9D[ 2u1 nA#+9y|zVU~}ջݶӌ)DJ9&8t:T$3,3n-"acZ&4JY PJhi*AOl63o\83:#PYm9fÕDD-%A얷pqiöA3cy)h![qءdi!١$Ij$}2QqD(|}2QwnS=x}ː>eOS>e#KO ~b`fg@TuW[)J&8"}OԢeHF/$v0jEe-Ө]g&dnH"7TJZ--i )nEc/_n?Tqw& fq>-_o7'=_g_FzFcUد=W_7\N& Kx;_{|9gv>ZWش^v?cT.} =7N rw>LZQ%8p6pkh:|5~\ݡښpBYRC#ʹ$-IزoOzNvf|ۋH_*e[1Ί[J`UFZ->߲$9PX]ٿt_hvv5N'8-Kt> Rn¡-Ҟc)7_Pͤn L8/nFW㥷lrԭHh ?jgk"d(:z%m o񾜍,!TˮCr"R,PS?ԟN)RE#iTƋ'RUVd'Qm0/#HEDidr*/0 2a mBF LA.rtr9NR{R!޳Ƣn0h嶪)W3.AvVu]C T9YfyWͪ9IFI!Ÿ&EϦzt?ǿ|C$H}CSeֵH%#kXk` z~ K#\oiZ~^ ! 5W}\Ք7F`dm( ŠMS}K_^>uΟ"nՇ695æQHTEQͻz2$jZ ngۉۡCK*' y&H%8uƍ֖3dk5TqU 0 }+JPq ֕+ݖAk ZU.'^q[WZPC2Z)8W(V`WגJ+Iuٖ|%7 rzB^(+8WbSհ$X*RUlK X<ǸՒu?eQBQ- BH8P FЃt^;0h v6㬽>T =/z ؿzosrIV-Z o\̸ߪu턔;W'I/}{0Vz+ .: ?8@5.H LZK!I)!Jmp4&Q-(јFe8VԔ^HZ2 N6TK8B;(B;(6r 4ZYXղ,,jiQ}O5|,lh64 ,E<`-_ 99ЪOI"<%TwM)It:k~fN"ժ !wh?wNf`/_:xձV,26m9ETY&Qm'ETYQ(-ʒE~TB*ˡrPe# N6^ yfҨ֔Q7#͝ ӡnf9] 'bW&s,1jٵ[Xb՝%e%}[bC,q%v%8\MAhVDp%jӹJ?op%B:ܦPM`EtDʽDeFˊ7BԌkdr+&Q x-5fbA+f0px>4 .F5wix\Rc#,z]+2=yH7t4;ɜ.Feˋ զ=dHttRhOpkPgZЛRX"ހbH m&y @?BZqݔT_UQ`!D[] ܪ8.v)+P-Ev$RݹH8oaG՜" ;ŽTFz3v1?[5f CaP1vszm\7]'gsO6"~/fXyC瞜7HE N:96ʔ50{TX >y'sSg"f*10`"Նw 5 JuAYÀޣ€Cp \8@' HSgNR(%LUf)pbjTZ [P*$hx8Z~y=%a0 ue5n^ |ːsB5>%#C׵Uiyy 2Yyc[l콥7`op]tL}OFUJ a>vXX "F"UͬǢ<=K3QCZAs4K '*č'\7Rɋ7"]HeHZ*R@G*ժkD *O )Naf[B VH%h7 -V'Va[mQӣ(y>?`l~{jvt(LSDQ$` VFWԮqִv JU8BL"iTޒ}{QEKٕK"bp]#t۷~p~wl3QMm<)HW PV[ bCo먴% ԈؿaZiǬ5Ս>OD8U5Ms4a;k|0l1Z+aݩVE2FuN{NZU|>F >{;)PQJWRS`@gU thmTH "i߆q"8%qZ*QP̨q8ztPzZr ֖RAZS^abާZ=\ [7+_-#zvko<"o]׿eFG|Lb3S"Q _uViTӮdTSQxD,b.AR-ZHxa0^wR)ԅ$,(kwϑ VnE[XAG&WiE5T {Owmqr Uz98^6.HlF3\l9,$DHkD?4 x$Ia? eYEP}$5ѓЂ,J \bުtA~;8Md9X$=N˲nrnJ{N$ZP$ ,:Vj0Pp%ZВ--y툹I'suwb3+$=nf A%t >vAq#ZrsI!MK#}|䂰6+SnBaWK]85m=cş`ҤLOD+'ZQТW=q?zx?^fhV#]G=Գ  2el1ZE>r]/D=.l3[,8G( Ŧmemd]h=xhwe֛LJㅌ(I;k".c]E(!.]O6?9\G?_w"5Dr'LsShl1Jb`ոbi1a#ݦ}Wzgn}4U U:Ɵ(- y5Je4J[PåhɈc,#d'w$E =H-[s3[lb&J(%aJպ`ʇ1 qHWcH`b=ؑVHRtC,$x?MLCXi,HWĄw\!nPϺNuu㸻nPꩥu'@<0Ԕ(jB=?'5L5KAEѐ3 5:aY߮A#7 C !gՅńRCԐ35L 9[GsM Y59پbwBϖ[ng[. fh}o'+/D3Yi0A_7- 2gvpa.O G(&ɄQ BR/{w7V0s(AIW>/q_I-32ݟ7r-ƚk&fWdofe ƼGNҕnՐaN[ +x׵*5. o>j4?5Z2l篃ħȳKX&}]]6N-"sDL % (TfCOdg\cZ }Um3rA/q/r_.LWg]r{+n[o,{7EQZ1D C.8Q?zz@Tꂟw]0Y+Z( (` jN+Z#G_POW0L}S_W 2kTEAb3F[&:1ҁ R%L BQЁPkB \H(aٖC.Q A-%J4Ds{2[v* %:P%L ld.%GGj#Nڈ{nZ~M'{)8AP brU(*iyM-@PU%.Me I#g6١X.qܒh&=[ #J<'q#]{GH҇}[8C-"I= 4%S>%{$ z|455ML63-p3ZF Gᓱ_@=Zr))ZixSᓱp}c&cQ=P'cQ%BMؔMؔ=d,sFm2V>Zc8,zz)}`-e\>J?R &``0!B)}xbL0qΘĘ/&;DusMIWA(95fGqZ8P˾$ʑQ8PF>v\ xJ9ͦgp4< c%ҏU uUGW5DXvpŵ+} pQ5xC7*Vb![Z ijc8a SqĥA)'qDxa# BP<#B4Ex)K^R1ի4j Ԋ=J! 5[u>r5C 꾼͑\\<멚!1}9Zvz3wF5&3QTJ]S] * mKa P<]UY?w'`OpA9}oJۉiO'֠֝ĒKN߉g!p}kGfVol0Դh#;B=ZL$s.&H ij}BN۳Tp5 ijĖ4Ū(8ȁn12x75;q8ԓON.Q\!Ƣz&46o jzF* j0O\H\s=(4XGp C-"ZQE‰ROIC S)%R©G‰ O H 0*a۷ '*9)Wړ=ړ=}jlTNG9ʵ5'UQBxh bG+]gע58q>1?LlCYgƝ\p;ߊA<={5 g LV| ž_ϟl>w3|6a+S ^3ܻksqw5,_T0j]@$1q^U*wEAV?DTJI;ujj=ZhM֑lߛ  %föni%X3S}fQW[~.<{m[S=X g\^7"As`D+ XX.kU˥-܂UY\P|{aa`߷vEn._n9?،ϖrqRV>s9/ae 8߾zlIv&'zR/rtV~;"BKr=oX\z'&%gkV.Fj~!TB־K[v$Zy="Ͼ],q|F0߾-nG*9Yo3 W,#`3'{0<4w"S{!og*1H\r-7gcoĢp5LOxpK ¾N F^»2,٦EMLv'.YE qjUa jMOZyv:wDp69ȩ드뜝s1ҏ?}_ӏvoZ5ڮIfzNhFoZv/3hVE}/ 2fa@DF Q4MjFD9(TW/Q/W= ;$qޏ7Z$m,5{Ԩ}BQӾ}G 0>j˜JR'ԉ( i2;g"L| DBQ3D"h7Gоa}Q7 }Cjе!3%W|UԔEHQkx\( `5ym*aKSق}W>N^iԴg0 q.2 F5%P0KCSLZqG(Z免 aG(f3mPKciG &d'(O}Sίx.Y3߼88uo mq @wSCUޮk|]`bu?ƷN(<ӣkkЫuYY[ Q٪%pCVCKcD$r3A}aN;*I/< ְ V$*ieDBsfpx`NPoRQkBEH%KTNVHh͔)TUQpyiT֜DVJY .171:PFPZEQ0>j2䷎oa2L̹R8V\15lRwV:sCs/gLJD"BQ+Jb+kyؔHT F &""/i}),OCQ)AmBW9V6eo@F*(SVc!}YcmGFWV!E_6& ;wSI*6*/dTHsBYh]Q?\mQOQ%ZD<%4)4O FZQ()F$)yJj3ڌ2"1GB$֏AhíI/HZ؍{gqῲKrnUK|RĹ$|&LK"RRp)э&+eLv_bhڢ8Z 2h E-y^5iZEO r䖇$plZ{PGSG(R#(ͩLbP6ִ&%(L2 nm"^v.y*.i T%TmT]Lչc]ҐUez%-]%-]ҟg4$[X m|K;Deb5](f~!Wey%KgqBdo^]K{)f1x']G%~bm͇kI:)mc D¼ZxS=P{Kw/LHj\Qܞ^b=אغ2eAxG#L19*y=@ L>T0Ss+1vI5iAiۻHb rSbՠU`<]-O6 3s{!J` f{k*mjn νtp:1>էwT%$IGK"q(P0Skx*qj\8O,  _A V-XͮS?+2 e:1?-;n .#B=teVκN65 Zg(,&{qV*,(T$Tgwz9RutI2v %蕠W^ z&y ..É5,pRv'o p$n㿥p V1fz>U5TgejzF5`u >:8y&0aާcW!L0)K$lnnN:x U4'0U:xm :q^utZ9A9!w c.r9:2߻ݜid;rwv;};tפsxr|L?۳K =I?| o ٿisrv`{vx޼N#QOp-!'y'9rhW͎ |78kӇ|}SuF'&w_n~~spu?67PejgɳbJ]FX^#ECzYUiCh~o OIGd*Md~/ݳbyqAlzjzQO6Н(81~l4El~ル-6jU7?wfv7p[b]r+Yo,-iۭy2T{u>krꠌ'&NLXK2n{U@,Nt/ T{y\thOoPC:vgoa1q}2zw>d~z_J/5WWM^.חt/o^m_7vtY&vY*WgfN5_\l4xc&jy11?f G8-ժ (&AUGFXOiXZj#TV6b>8ޚ򦎌+gM*HU2_b!b]S5ToTS`UTm a }OTBsU0W\?sɸydV}>z/,@! uz$;݀ޙ> ؍7A}6۱E)Xm&Z["}G5-UriόǮs6ZZͽ #6h(Tt53iz: ێt#M; r=V pHA˵koqZmr]gh9jcQm]fzaQ픭,3hQ@O=?ϐeu1ѕDG3G;tDmF٦l[׌䗺@phuC2J2  f*+P ņN1Ʀ1ؒn"C4Lp75cmTCA' ]( k(~i|N+;KL;\CK8 vzr1Ymr]euFuRG4?d1iR:u\Uɮ?p%var/home/core/zuul-output/logs/kubelet.log0000644000000000000000004413670715137435237017723 0ustar rootrootJan 31 16:29:10 crc systemd[1]: Starting Kubernetes Kubelet... Jan 31 16:29:10 crc restorecon[4692]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:10 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 31 16:29:11 crc restorecon[4692]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 31 16:29:12 crc kubenswrapper[4769]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.406386 4769 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418127 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418160 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418170 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418182 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418191 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418202 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418214 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418226 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418247 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418257 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418265 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418274 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418282 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418292 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418300 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418309 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418318 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418326 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418335 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418345 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418353 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418362 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418372 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418380 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418389 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418398 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418406 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418419 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418430 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418443 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418465 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418477 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418487 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418503 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418513 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418523 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418557 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418567 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418577 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418586 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418595 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418604 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418612 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418621 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418629 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418639 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418648 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418657 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418665 4769 feature_gate.go:330] unrecognized feature gate: Example Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418674 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418682 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418691 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418702 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418712 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418720 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418735 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418744 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418755 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418763 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418772 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418780 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418789 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418798 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418806 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418819 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418830 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418852 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418861 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418871 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418880 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.418888 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419083 4769 flags.go:64] FLAG: --address="0.0.0.0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419103 4769 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419127 4769 flags.go:64] FLAG: --anonymous-auth="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419141 4769 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419154 4769 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419164 4769 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419177 4769 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419190 4769 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419200 4769 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419210 4769 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419221 4769 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419232 4769 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419242 4769 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419252 4769 flags.go:64] FLAG: --cgroup-root="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419261 4769 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419271 4769 flags.go:64] FLAG: --client-ca-file="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419281 4769 flags.go:64] FLAG: --cloud-config="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419291 4769 flags.go:64] FLAG: --cloud-provider="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419302 4769 flags.go:64] FLAG: --cluster-dns="[]" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419320 4769 flags.go:64] FLAG: --cluster-domain="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419330 4769 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419340 4769 flags.go:64] FLAG: --config-dir="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419350 4769 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419361 4769 flags.go:64] FLAG: --container-log-max-files="5" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419374 4769 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419384 4769 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419394 4769 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419404 4769 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419414 4769 flags.go:64] FLAG: --contention-profiling="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419424 4769 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419435 4769 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419458 4769 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419469 4769 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419482 4769 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419498 4769 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419508 4769 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419518 4769 flags.go:64] FLAG: --enable-load-reader="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419528 4769 flags.go:64] FLAG: --enable-server="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419568 4769 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419598 4769 flags.go:64] FLAG: --event-burst="100" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419609 4769 flags.go:64] FLAG: --event-qps="50" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419619 4769 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419629 4769 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419639 4769 flags.go:64] FLAG: --eviction-hard="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419651 4769 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419660 4769 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419670 4769 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419681 4769 flags.go:64] FLAG: --eviction-soft="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419690 4769 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419700 4769 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419710 4769 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419720 4769 flags.go:64] FLAG: --experimental-mounter-path="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419730 4769 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419741 4769 flags.go:64] FLAG: --fail-swap-on="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419750 4769 flags.go:64] FLAG: --feature-gates="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419762 4769 flags.go:64] FLAG: --file-check-frequency="20s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419772 4769 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419783 4769 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419793 4769 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419803 4769 flags.go:64] FLAG: --healthz-port="10248" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419813 4769 flags.go:64] FLAG: --help="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419822 4769 flags.go:64] FLAG: --hostname-override="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419832 4769 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419842 4769 flags.go:64] FLAG: --http-check-frequency="20s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419852 4769 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419861 4769 flags.go:64] FLAG: --image-credential-provider-config="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419870 4769 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419898 4769 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419908 4769 flags.go:64] FLAG: --image-service-endpoint="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419918 4769 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419928 4769 flags.go:64] FLAG: --kube-api-burst="100" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419938 4769 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419948 4769 flags.go:64] FLAG: --kube-api-qps="50" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419958 4769 flags.go:64] FLAG: --kube-reserved="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419968 4769 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419977 4769 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419987 4769 flags.go:64] FLAG: --kubelet-cgroups="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.419996 4769 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420006 4769 flags.go:64] FLAG: --lock-file="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420016 4769 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420026 4769 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420036 4769 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420052 4769 flags.go:64] FLAG: --log-json-split-stream="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420062 4769 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420073 4769 flags.go:64] FLAG: --log-text-split-stream="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420082 4769 flags.go:64] FLAG: --logging-format="text" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420093 4769 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420104 4769 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420113 4769 flags.go:64] FLAG: --manifest-url="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420123 4769 flags.go:64] FLAG: --manifest-url-header="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420136 4769 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420146 4769 flags.go:64] FLAG: --max-open-files="1000000" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420159 4769 flags.go:64] FLAG: --max-pods="110" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420169 4769 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420179 4769 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420189 4769 flags.go:64] FLAG: --memory-manager-policy="None" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420199 4769 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420208 4769 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420218 4769 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420229 4769 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420251 4769 flags.go:64] FLAG: --node-status-max-images="50" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420261 4769 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420271 4769 flags.go:64] FLAG: --oom-score-adj="-999" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420293 4769 flags.go:64] FLAG: --pod-cidr="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420303 4769 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420318 4769 flags.go:64] FLAG: --pod-manifest-path="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420328 4769 flags.go:64] FLAG: --pod-max-pids="-1" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420337 4769 flags.go:64] FLAG: --pods-per-core="0" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420347 4769 flags.go:64] FLAG: --port="10250" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420357 4769 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420367 4769 flags.go:64] FLAG: --provider-id="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420377 4769 flags.go:64] FLAG: --qos-reserved="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420386 4769 flags.go:64] FLAG: --read-only-port="10255" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420396 4769 flags.go:64] FLAG: --register-node="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420406 4769 flags.go:64] FLAG: --register-schedulable="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420417 4769 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420435 4769 flags.go:64] FLAG: --registry-burst="10" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420446 4769 flags.go:64] FLAG: --registry-qps="5" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420457 4769 flags.go:64] FLAG: --reserved-cpus="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420467 4769 flags.go:64] FLAG: --reserved-memory="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420491 4769 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420501 4769 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420559 4769 flags.go:64] FLAG: --rotate-certificates="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420569 4769 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420579 4769 flags.go:64] FLAG: --runonce="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420589 4769 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420599 4769 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420610 4769 flags.go:64] FLAG: --seccomp-default="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420619 4769 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420629 4769 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420639 4769 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420649 4769 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420659 4769 flags.go:64] FLAG: --storage-driver-password="root" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420669 4769 flags.go:64] FLAG: --storage-driver-secure="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420679 4769 flags.go:64] FLAG: --storage-driver-table="stats" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420688 4769 flags.go:64] FLAG: --storage-driver-user="root" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420698 4769 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420708 4769 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420718 4769 flags.go:64] FLAG: --system-cgroups="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420741 4769 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420756 4769 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420766 4769 flags.go:64] FLAG: --tls-cert-file="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420775 4769 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420792 4769 flags.go:64] FLAG: --tls-min-version="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420802 4769 flags.go:64] FLAG: --tls-private-key-file="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420811 4769 flags.go:64] FLAG: --topology-manager-policy="none" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420821 4769 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420837 4769 flags.go:64] FLAG: --topology-manager-scope="container" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420847 4769 flags.go:64] FLAG: --v="2" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420861 4769 flags.go:64] FLAG: --version="false" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420874 4769 flags.go:64] FLAG: --vmodule="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420886 4769 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.420896 4769 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421192 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421203 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421212 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421225 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421235 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421244 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421254 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421263 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421273 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421283 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421292 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421304 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421314 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421324 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421345 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421354 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421364 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421373 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421383 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421392 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421401 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421409 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421432 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421441 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421453 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421463 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421478 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421488 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421503 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421513 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421521 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421559 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421569 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421578 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421586 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421596 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421606 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421614 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421623 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421632 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421642 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421651 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421660 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421668 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421676 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421685 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421694 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421703 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421711 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421720 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421728 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421737 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421745 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421753 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421762 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421770 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421779 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421787 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421815 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421824 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421833 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421842 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421850 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421859 4769 feature_gate.go:330] unrecognized feature gate: Example Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421867 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421876 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421884 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421893 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421901 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421910 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.421918 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.423218 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.442272 4769 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.442340 4769 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442491 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442512 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442522 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442531 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442568 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442577 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442585 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442593 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442602 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442611 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442618 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442626 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442634 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442644 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442653 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442661 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442669 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442678 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442686 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442695 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442703 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442711 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442720 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442729 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442740 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442754 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442763 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442772 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442781 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442791 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442800 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442810 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442818 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442826 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442837 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442848 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442857 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442867 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442875 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442885 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442895 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442905 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442914 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442922 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442931 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442939 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442947 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442955 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442963 4769 feature_gate.go:330] unrecognized feature gate: Example Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442973 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442980 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442988 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.442996 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443004 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443011 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443020 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443028 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443035 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443046 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443056 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443066 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443073 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443081 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443090 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443098 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443106 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443113 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443121 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443128 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443139 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443149 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.443163 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443412 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443427 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443436 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443445 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443455 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443466 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443475 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443484 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443499 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443508 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443516 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443525 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443555 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443565 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443574 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443583 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443593 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443600 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443609 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443617 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443626 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443635 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443643 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443651 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443660 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443667 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443675 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443683 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443691 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443699 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443707 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443715 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443724 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443731 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443742 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443750 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443758 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443783 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443793 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443802 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443811 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443820 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443828 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443835 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443843 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443852 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443862 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443871 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443880 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443888 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443896 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443904 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443912 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443920 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443928 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443936 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443944 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443952 4769 feature_gate.go:330] unrecognized feature gate: Example Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443960 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443968 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443976 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443985 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.443995 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444006 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444015 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444023 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444032 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444040 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444048 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444056 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.444064 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.444077 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.444366 4769 server.go:940] "Client rotation is on, will bootstrap in background" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.452922 4769 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.453074 4769 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.455073 4769 server.go:997] "Starting client certificate rotation" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.455119 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.455341 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-23 10:07:44.208733213 +0000 UTC Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.455589 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.488264 4769 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.491854 4769 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.492167 4769 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.516657 4769 log.go:25] "Validated CRI v1 runtime API" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.561437 4769 log.go:25] "Validated CRI v1 image API" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.564376 4769 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.571379 4769 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-31-16-25-19-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.571541 4769 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.608759 4769 manager.go:217] Machine: {Timestamp:2026-01-31 16:29:12.605048569 +0000 UTC m=+0.679217288 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0 BootID:8a4704f7-ede0-4833-ba79-415de5d798cc Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:c4:24:69 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:c4:24:69 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:33:0e:d7 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b9:02:63 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:91:81:52 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:34:94:46 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:9a:b5:12:91:2a:8c Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:d6:b4:94:9f:e3:a6 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.609263 4769 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.609698 4769 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.611318 4769 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.611722 4769 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.611781 4769 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.612162 4769 topology_manager.go:138] "Creating topology manager with none policy" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.612183 4769 container_manager_linux.go:303] "Creating device plugin manager" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.612728 4769 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.612833 4769 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.613224 4769 state_mem.go:36] "Initialized new in-memory state store" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.613378 4769 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.617149 4769 kubelet.go:418] "Attempting to sync node with API server" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.617189 4769 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.617223 4769 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.617283 4769 kubelet.go:324] "Adding apiserver pod source" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.617303 4769 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.622121 4769 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.623354 4769 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.623989 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.624080 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.624204 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.624368 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.626750 4769 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628450 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628500 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628516 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628533 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628590 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628605 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628620 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628645 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628663 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628678 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628727 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.628743 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.630746 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.632373 4769 server.go:1280] "Started kubelet" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.632583 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.633384 4769 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.633396 4769 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 31 16:29:12 crc systemd[1]: Started Kubernetes Kubelet. Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.634387 4769 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.637085 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.637155 4769 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.638237 4769 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.638267 4769 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.638480 4769 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.637407 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 12:33:09.533905554 +0000 UTC Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.639566 4769 server.go:460] "Adding debug handlers to kubelet server" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.639181 4769 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.640721 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.644986 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.645520 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="200ms" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.646606 4769 factory.go:153] Registering CRI-O factory Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.646660 4769 factory.go:221] Registration of the crio container factory successfully Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.646812 4769 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.646842 4769 factory.go:55] Registering systemd factory Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.646856 4769 factory.go:221] Registration of the systemd container factory successfully Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.647584 4769 factory.go:103] Registering Raw factory Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.647663 4769 manager.go:1196] Started watching for new ooms in manager Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.649258 4769 manager.go:319] Starting recovery of all containers Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.654141 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.22:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188fddb611cd26b8 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-31 16:29:12.632338104 +0000 UTC m=+0.706506813,LastTimestamp:2026-01-31 16:29:12.632338104 +0000 UTC m=+0.706506813,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662218 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662295 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662390 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662425 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662464 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662485 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662514 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662573 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662609 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662633 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662656 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662677 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662705 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662803 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662836 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662892 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662919 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.662948 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663039 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663124 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663154 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663174 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663241 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663305 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663344 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663447 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663488 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663518 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663678 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663701 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663723 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663751 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663790 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663818 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663870 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663909 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663938 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.663969 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664009 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664038 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664143 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664171 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664198 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664225 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664251 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664276 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664303 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664338 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664400 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664434 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664483 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664526 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664588 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664614 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664647 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664670 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664697 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664734 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664755 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664783 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664814 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664842 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664870 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664901 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664929 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664949 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.664977 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665005 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665038 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665066 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665103 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665138 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665173 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665198 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665234 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665272 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665294 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665313 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665332 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665367 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665392 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665418 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665456 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665492 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665530 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665583 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665605 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665630 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665651 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665670 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665689 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665708 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665727 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665746 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665765 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665793 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665811 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665838 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665857 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665877 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665896 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665919 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665946 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.665971 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666019 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666044 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666064 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666083 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666108 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666129 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666156 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666176 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666198 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666220 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666250 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666270 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666290 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666316 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666336 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666366 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666385 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666412 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666430 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666450 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666475 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666501 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666520 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666621 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666642 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666662 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666681 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.666698 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667328 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667352 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667372 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667403 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667429 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667484 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667506 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667667 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667708 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667742 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667781 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667816 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667843 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667871 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667899 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667920 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667939 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667957 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.667980 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668000 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668027 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668047 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668067 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668085 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668105 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668124 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668144 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668166 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668189 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668217 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668238 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668257 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668282 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668300 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668322 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668341 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668361 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668378 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668397 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668416 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668434 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668460 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668479 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668515 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668562 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668587 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668615 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668633 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668665 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668683 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668703 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668724 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668746 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668765 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668795 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668815 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668836 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668854 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668880 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.668919 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.669621 4769 manager.go:324] Recovery completed Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.670842 4769 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.670885 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.670907 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.670932 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.670964 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671086 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671110 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671151 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671173 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671246 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671264 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671281 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671298 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671337 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671356 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671378 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671398 4769 reconstruct.go:97] "Volume reconstruction finished" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.671411 4769 reconciler.go:26] "Reconciler: start to sync state" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.678958 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.686387 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.686460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.686479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.692240 4769 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.692275 4769 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.692359 4769 state_mem.go:36] "Initialized new in-memory state store" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.704687 4769 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.706777 4769 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.706840 4769 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.706888 4769 kubelet.go:2335] "Starting kubelet main sync loop" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.706968 4769 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 31 16:29:12 crc kubenswrapper[4769]: W0131 16:29:12.707737 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.707901 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.711958 4769 policy_none.go:49] "None policy: Start" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.714782 4769 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.714807 4769 state_mem.go:35] "Initializing new in-memory state store" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.740183 4769 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.792812 4769 manager.go:334] "Starting Device Plugin manager" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.793089 4769 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.793116 4769 server.go:79] "Starting device plugin registration server" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.793783 4769 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.793811 4769 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.794504 4769 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.794695 4769 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.794716 4769 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.802408 4769 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.807564 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.807679 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.809306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.809373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.809397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.809713 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.810087 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.810164 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811580 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.811734 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813100 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813315 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813543 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813602 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.813697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.814730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.814772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.814784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.814978 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815135 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815188 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815960 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.815978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.816259 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.816365 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.816536 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.816555 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.816570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.817340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.817376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.817389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.846670 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="400ms" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874220 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874277 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874310 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874332 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874388 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874467 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874667 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874702 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874726 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874748 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874781 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874804 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874825 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874856 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.874988 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.894617 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.895996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.896033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.896044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.896071 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:12 crc kubenswrapper[4769]: E0131 16:29:12.896605 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.22:6443: connect: connection refused" node="crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976442 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976523 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976608 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976670 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976753 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976870 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976879 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976916 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976876 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976822 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976952 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976804 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.976992 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977112 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977091 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977156 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977190 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977264 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977325 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977353 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977542 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977562 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977615 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977480 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:12 crc kubenswrapper[4769]: I0131 16:29:12.977753 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.097107 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.099071 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.099194 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.099252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.099337 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:13 crc kubenswrapper[4769]: E0131 16:29:13.100585 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.22:6443: connect: connection refused" node="crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.146815 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.160291 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.180085 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: W0131 16:29:13.197183 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-597960d2d0b1ce23c11e0eb5a22762e5409b48c5ea79f66f40d74fd04dc656ae WatchSource:0}: Error finding container 597960d2d0b1ce23c11e0eb5a22762e5409b48c5ea79f66f40d74fd04dc656ae: Status 404 returned error can't find the container with id 597960d2d0b1ce23c11e0eb5a22762e5409b48c5ea79f66f40d74fd04dc656ae Jan 31 16:29:13 crc kubenswrapper[4769]: W0131 16:29:13.198442 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-26512b19cdee8eb12ea8d1196b6b89760e4041746ba8c4e35105f6f518962b28 WatchSource:0}: Error finding container 26512b19cdee8eb12ea8d1196b6b89760e4041746ba8c4e35105f6f518962b28: Status 404 returned error can't find the container with id 26512b19cdee8eb12ea8d1196b6b89760e4041746ba8c4e35105f6f518962b28 Jan 31 16:29:13 crc kubenswrapper[4769]: W0131 16:29:13.202705 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-3a56ffc284641d9c590a7ecb90b5a99595d9342d8039510b57aa28e81f307d86 WatchSource:0}: Error finding container 3a56ffc284641d9c590a7ecb90b5a99595d9342d8039510b57aa28e81f307d86: Status 404 returned error can't find the container with id 3a56ffc284641d9c590a7ecb90b5a99595d9342d8039510b57aa28e81f307d86 Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.205067 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.214642 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:13 crc kubenswrapper[4769]: W0131 16:29:13.236668 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7c7cf76d62f884fdf968b7cf651785a65be993ff1e932f10801e8e9285f95b00 WatchSource:0}: Error finding container 7c7cf76d62f884fdf968b7cf651785a65be993ff1e932f10801e8e9285f95b00: Status 404 returned error can't find the container with id 7c7cf76d62f884fdf968b7cf651785a65be993ff1e932f10801e8e9285f95b00 Jan 31 16:29:13 crc kubenswrapper[4769]: E0131 16:29:13.247622 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="800ms" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.501318 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.503202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.503251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.503265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.503297 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:13 crc kubenswrapper[4769]: E0131 16:29:13.503869 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.22:6443: connect: connection refused" node="crc" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.634030 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:13 crc kubenswrapper[4769]: W0131 16:29:13.637648 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:13 crc kubenswrapper[4769]: E0131 16:29:13.637774 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.639716 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 09:45:40.674098125 +0000 UTC Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.712569 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3a56ffc284641d9c590a7ecb90b5a99595d9342d8039510b57aa28e81f307d86"} Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.714002 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"597960d2d0b1ce23c11e0eb5a22762e5409b48c5ea79f66f40d74fd04dc656ae"} Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.719446 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"26512b19cdee8eb12ea8d1196b6b89760e4041746ba8c4e35105f6f518962b28"} Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.721004 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7c7cf76d62f884fdf968b7cf651785a65be993ff1e932f10801e8e9285f95b00"} Jan 31 16:29:13 crc kubenswrapper[4769]: I0131 16:29:13.722039 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"20ae18fa419ddcbabbc2035cea442c8bda627881a5fc05d303d62397f2cc0ba6"} Jan 31 16:29:14 crc kubenswrapper[4769]: W0131 16:29:14.034455 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.034549 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.048670 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="1.6s" Jan 31 16:29:14 crc kubenswrapper[4769]: W0131 16:29:14.131898 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.131961 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:14 crc kubenswrapper[4769]: W0131 16:29:14.164849 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.164958 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.304764 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.306298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.306362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.306382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.306422 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.306922 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.22:6443: connect: connection refused" node="crc" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.585995 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 31 16:29:14 crc kubenswrapper[4769]: E0131 16:29:14.587374 4769 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.634374 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.640938 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 13:31:36.209118185 +0000 UTC Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.727755 4769 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212" exitCode=0 Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.727852 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.727998 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.729947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.730025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.730052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.730826 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.730894 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.733146 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a" exitCode=0 Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.733235 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.733340 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.734564 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.734624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.734642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.736038 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef" exitCode=0 Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.736145 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.736180 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.737155 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.742094 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.742132 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.742146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.742973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.743092 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.743121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.744417 4769 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8" exitCode=0 Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.744469 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8"} Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.744509 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.745950 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.746047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:14 crc kubenswrapper[4769]: I0131 16:29:14.746133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.633306 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.641626 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 06:33:53.169466214 +0000 UTC Jan 31 16:29:15 crc kubenswrapper[4769]: E0131 16:29:15.650259 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="3.2s" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.754010 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.754058 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.754072 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.754083 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.756009 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547" exitCode=0 Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.756084 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.756177 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.757400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.757435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.757444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.758789 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a0acdb96d69965a7e3f18674c45f56b62ffa359a948133793971dd31fd8df1f1"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.758865 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.760757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.760781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.760791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.767133 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.767177 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.767203 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.767229 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.768837 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.768875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.768891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.773156 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.773265 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.773305 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e"} Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.774348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.774378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.774391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: W0131 16:29:15.885688 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:15 crc kubenswrapper[4769]: E0131 16:29:15.885794 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:15 crc kubenswrapper[4769]: W0131 16:29:15.886250 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.22:6443: connect: connection refused Jan 31 16:29:15 crc kubenswrapper[4769]: E0131 16:29:15.886347 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.22:6443: connect: connection refused" logger="UnhandledError" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.907709 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.909185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.909227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.909243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:15 crc kubenswrapper[4769]: I0131 16:29:15.909274 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:15 crc kubenswrapper[4769]: E0131 16:29:15.909825 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.22:6443: connect: connection refused" node="crc" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.642304 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 09:31:05.668224705 +0000 UTC Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.778343 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d" exitCode=0 Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.778433 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d"} Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.778565 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.779732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.779801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.779830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.782917 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.782980 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.783043 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.783079 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.783088 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.782955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0"} Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784971 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.785014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.785032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.784972 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:16 crc kubenswrapper[4769]: I0131 16:29:16.785160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.361851 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.643081 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 11:55:18.367094829 +0000 UTC Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794526 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d282bca47e56b022765460bd4e2c34df8d15e19b52199a1cded767fada46ae2b"} Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794589 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1fd57645c8ddb97e6fb290f81195428e2bfd6b3af321e260cd4007cadc0fc496"} Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794603 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0717f838e9dd04fe04218d3858edda9cd76359c43d888bef60f5c6d533d35ec1"} Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794617 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"52ce0326d494aa79957bf95a8d09e34edec45f9d6145f648b035aed46693afa0"} Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794614 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.794704 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.795952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.796003 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:17 crc kubenswrapper[4769]: I0131 16:29:17.796015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.643884 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 21:33:17.693408915 +0000 UTC Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.744709 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.803842 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a52d14237bd46861c4f0ee3500a8cc3baf688f273b03262ada1439aaea53eb61"} Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.803910 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.803991 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.804008 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:18 crc kubenswrapper[4769]: I0131 16:29:18.805703 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.113899 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.122319 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.122360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.122369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.122395 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.355206 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.644086 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 16:19:23.315627274 +0000 UTC Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.664453 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.664763 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.666484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.666569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.666587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.806415 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.806483 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.807197 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.808790 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.808861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.808886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.809117 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.809159 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:19 crc kubenswrapper[4769]: I0131 16:29:19.809177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.447141 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.608683 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.644719 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 14:44:45.74032259 +0000 UTC Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.644803 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.644970 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.646586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.646663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.646690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.808791 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.809594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.809626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:20 crc kubenswrapper[4769]: I0131 16:29:20.809637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.644328 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.644706 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.644846 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 04:24:18.604930487 +0000 UTC Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.646252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.646321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.646348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.813852 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.817115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.817184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:21 crc kubenswrapper[4769]: I0131 16:29:21.817206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.645081 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 10:49:19.243951906 +0000 UTC Jan 31 16:29:22 crc kubenswrapper[4769]: E0131 16:29:22.802576 4769 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.984041 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.984356 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.986142 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.986215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:22 crc kubenswrapper[4769]: I0131 16:29:22.986240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.645981 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 14:30:04.477916212 +0000 UTC Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.702146 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.714621 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.818188 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.819755 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.819805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.819826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:23 crc kubenswrapper[4769]: I0131 16:29:23.822188 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:24 crc kubenswrapper[4769]: I0131 16:29:24.647144 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 18:27:21.563873194 +0000 UTC Jan 31 16:29:24 crc kubenswrapper[4769]: I0131 16:29:24.821452 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:24 crc kubenswrapper[4769]: I0131 16:29:24.822762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:24 crc kubenswrapper[4769]: I0131 16:29:24.822817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:24 crc kubenswrapper[4769]: I0131 16:29:24.822830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.276029 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.647319 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 20:10:10.303745746 +0000 UTC Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.823633 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.825259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.825328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:25 crc kubenswrapper[4769]: I0131 16:29:25.825354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:26 crc kubenswrapper[4769]: W0131 16:29:26.261608 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.261737 4769 trace.go:236] Trace[31055294]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (31-Jan-2026 16:29:16.259) (total time: 10002ms): Jan 31 16:29:26 crc kubenswrapper[4769]: Trace[31055294]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:29:26.261) Jan 31 16:29:26 crc kubenswrapper[4769]: Trace[31055294]: [10.002056836s] [10.002056836s] END Jan 31 16:29:26 crc kubenswrapper[4769]: E0131 16:29:26.261773 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.635424 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.649052 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 15:08:28.291811741 +0000 UTC Jan 31 16:29:26 crc kubenswrapper[4769]: W0131 16:29:26.777932 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.778034 4769 trace.go:236] Trace[589169134]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (31-Jan-2026 16:29:16.776) (total time: 10001ms): Jan 31 16:29:26 crc kubenswrapper[4769]: Trace[589169134]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (16:29:26.777) Jan 31 16:29:26 crc kubenswrapper[4769]: Trace[589169134]: [10.001935252s] [10.001935252s] END Jan 31 16:29:26 crc kubenswrapper[4769]: E0131 16:29:26.778058 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.825763 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.826508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.826546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:26 crc kubenswrapper[4769]: I0131 16:29:26.826557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:27 crc kubenswrapper[4769]: I0131 16:29:27.216384 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 31 16:29:27 crc kubenswrapper[4769]: I0131 16:29:27.216472 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 31 16:29:27 crc kubenswrapper[4769]: I0131 16:29:27.227632 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 31 16:29:27 crc kubenswrapper[4769]: I0131 16:29:27.227700 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 31 16:29:27 crc kubenswrapper[4769]: I0131 16:29:27.649442 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 16:32:21.633689827 +0000 UTC Jan 31 16:29:28 crc kubenswrapper[4769]: I0131 16:29:28.276604 4769 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 31 16:29:28 crc kubenswrapper[4769]: I0131 16:29:28.276678 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 31 16:29:28 crc kubenswrapper[4769]: I0131 16:29:28.650102 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 12:43:06.290016888 +0000 UTC Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.364283 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.364673 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.366207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.366465 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.366698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.372201 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.651017 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 11:03:34.955777593 +0000 UTC Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.832993 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.834606 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.834641 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:29 crc kubenswrapper[4769]: I0131 16:29:29.834657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.648757 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.648960 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.650672 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.650746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.650765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.651564 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 15:49:47.784398619 +0000 UTC Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.668385 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.835232 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.836037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.836148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.836168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:30 crc kubenswrapper[4769]: I0131 16:29:30.896985 4769 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:31 crc kubenswrapper[4769]: I0131 16:29:31.652051 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 19:32:32.651085644 +0000 UTC Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.216660 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.218838 4769 trace.go:236] Trace[316373661]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (31-Jan-2026 16:29:20.349) (total time: 11868ms): Jan 31 16:29:32 crc kubenswrapper[4769]: Trace[316373661]: ---"Objects listed" error: 11868ms (16:29:32.218) Jan 31 16:29:32 crc kubenswrapper[4769]: Trace[316373661]: [11.868921392s] [11.868921392s] END Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.218879 4769 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.220303 4769 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.220425 4769 trace.go:236] Trace[1869619611]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (31-Jan-2026 16:29:21.488) (total time: 10732ms): Jan 31 16:29:32 crc kubenswrapper[4769]: Trace[1869619611]: ---"Objects listed" error: 10732ms (16:29:32.220) Jan 31 16:29:32 crc kubenswrapper[4769]: Trace[1869619611]: [10.732185496s] [10.732185496s] END Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.220453 4769 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.222990 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.233401 4769 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.244460 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:51136->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.244539 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:51136->192.168.126.11:17697: read: connection reset by peer" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.244893 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.244978 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.245323 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.245345 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.270472 4769 csr.go:261] certificate signing request csr-tgcq7 is approved, waiting to be issued Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.281610 4769 csr.go:257] certificate signing request csr-tgcq7 is issued Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.455143 4769 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 31 16:29:32 crc kubenswrapper[4769]: W0131 16:29:32.455519 4769 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 31 16:29:32 crc kubenswrapper[4769]: W0131 16:29:32.455557 4769 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Node ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.455552 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/events\": read tcp 38.102.83.22:54450->38.102.83.22:6443: use of closed network connection" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188fddb633cc7fd7 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:d1b160f5dda77d281dd8e69ec8d817f9,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-31 16:29:13.202720727 +0000 UTC m=+1.276889426,LastTimestamp:2026-01-31 16:29:13.202720727 +0000 UTC m=+1.276889426,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.628277 4769 apiserver.go:52] "Watching apiserver" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.634224 4769 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.634541 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.635847 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.635918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.635979 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.636163 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.636306 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.636533 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.637036 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.637071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.637115 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.637538 4769 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.639131 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.639331 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.639734 4769 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640394 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640406 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640741 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640749 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640895 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.640960 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.641069 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.652304 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 11:22:40.73876153 +0000 UTC Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.679922 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.698588 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.712911 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724667 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724780 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724849 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724889 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724964 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.724998 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725033 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725066 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725101 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725140 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725177 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725211 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725239 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725257 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725300 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725334 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725371 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725405 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725443 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725482 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725556 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725591 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725808 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725813 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725866 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725880 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.725986 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726033 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726069 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726073 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726100 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726069 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726131 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726162 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726196 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726227 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726277 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726309 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726325 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726341 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726372 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726405 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726523 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726600 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726727 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726861 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726944 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727125 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.726562 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727188 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727212 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727278 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727306 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727333 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727342 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727356 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727379 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727402 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727451 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727472 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727553 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727582 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727603 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727624 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727648 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727671 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727692 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727712 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727700 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727734 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727763 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727810 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727836 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727885 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727957 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727990 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.727990 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728022 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728126 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728132 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728148 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728195 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728331 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728372 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728382 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728402 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728384 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728447 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728478 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728565 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728768 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.728964 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729129 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729159 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729258 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729667 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729719 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729728 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729762 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729760 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729905 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729949 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730541 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730652 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730700 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730732 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730765 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730798 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730829 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730878 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730912 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730976 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731006 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731039 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731068 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731107 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731151 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731189 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731271 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731307 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731338 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731372 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731863 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731927 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731960 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732005 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732044 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732079 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732162 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732197 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732236 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732295 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732334 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732370 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732421 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732539 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732590 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732633 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732668 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732700 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732731 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732762 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732793 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733378 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733431 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733459 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733577 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733669 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733703 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733736 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733768 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733801 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733835 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733867 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733937 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733969 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734034 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734120 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734147 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734209 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734229 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734281 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734306 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734355 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734380 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734439 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734462 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734486 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734554 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734579 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734646 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734668 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734715 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734841 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734866 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734891 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734918 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734950 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734982 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.735018 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729896 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.729928 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737802 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730176 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730270 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730294 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730390 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.730730 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731055 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731166 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.731639 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732025 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732306 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732546 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732583 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732608 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.732624 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733011 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733178 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733264 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.733616 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734240 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734285 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734612 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734776 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.734998 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.735083 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:29:33.235012723 +0000 UTC m=+21.309181412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.735883 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736290 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736318 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736335 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736405 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736602 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736891 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.736976 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737017 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738086 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737577 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737661 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738232 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738281 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738354 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738399 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738412 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738453 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738486 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738558 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738593 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738611 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738626 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738702 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738769 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738801 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738861 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738891 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738923 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738949 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738977 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739001 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739021 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739028 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739052 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739122 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739149 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739171 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739195 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739216 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739241 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739261 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739282 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739301 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739336 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739356 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739379 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739397 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739415 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739432 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739449 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739470 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.739853 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.740044 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.740076 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.740617 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.740974 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741118 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741126 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741381 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741714 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741754 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741776 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741721 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737848 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.738072 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.741307 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742055 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742096 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742413 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742461 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742652 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742679 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743022 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743103 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.737768 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743151 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743344 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.742661 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743812 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744491 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744551 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744732 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744736 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744831 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744873 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.744902 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.745165 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.745396 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.745464 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.743629 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746187 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746481 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746602 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746667 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746752 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746795 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746964 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.746994 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747041 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747082 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747127 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747178 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747219 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747251 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747260 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747343 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747452 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747387 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747537 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747619 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.747549 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748009 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.748042 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.748133 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:33.248105017 +0000 UTC m=+21.322273696 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748127 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748157 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748572 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748612 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749137 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748712 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749283 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749280 4769 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749319 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749342 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749344 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749361 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749421 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749448 4769 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749488 4769 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749546 4769 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749570 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749594 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749617 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749638 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749659 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749680 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749701 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749722 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749742 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749762 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749781 4769 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749800 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749819 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749840 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749859 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749879 4769 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749897 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749916 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.749936 4769 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750004 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750015 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750058 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750079 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750099 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.749039 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748767 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750118 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750172 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750191 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750209 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750234 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750252 4769 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750271 4769 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.748865 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.750318 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:33.250278037 +0000 UTC m=+21.324446736 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750380 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750410 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750433 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750432 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750454 4769 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750693 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750761 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750780 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.750797 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751067 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751084 4769 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751100 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751116 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751131 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751146 4769 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751162 4769 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.751178 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.752242 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.752265 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.752281 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.757373 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.757624 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.757733 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.757964 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758099 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758176 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758214 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758706 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758765 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758790 4769 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758813 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758835 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758856 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758879 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758910 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758933 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758964 4769 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.758985 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759009 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759006 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759030 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759051 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759073 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759094 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759115 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759126 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759136 4769 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759201 4769 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759226 4769 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759248 4769 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759400 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759421 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759441 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759461 4769 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759482 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759531 4769 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759551 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759570 4769 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759589 4769 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759636 4769 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759660 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759680 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759711 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759737 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759762 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759846 4769 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759950 4769 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.759980 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760005 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760031 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760061 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760084 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760107 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760134 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760160 4769 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760186 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760212 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760238 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760263 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760290 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760315 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760341 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760367 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760389 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760408 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760427 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760447 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760535 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.760906 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.761412 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.761511 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.761634 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.761659 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.762280 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.762575 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.764706 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.766548 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.766581 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.766598 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.766687 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:33.266664463 +0000 UTC m=+21.340833142 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.769668 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.770014 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.770687 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.771239 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.772872 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.773369 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.773556 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.774184 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.774447 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.777156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.779996 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.780718 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.782583 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.782729 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.782781 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.783138 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.783158 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.783277 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.783310 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784228 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784333 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784754 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784945 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784975 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.784996 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.785382 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.785596 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.787479 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.787287 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.788075 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.787885 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.788234 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.788438 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.788607 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.790099 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.790704 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.790740 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.790759 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:32 crc kubenswrapper[4769]: E0131 16:29:32.790830 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:33.290806905 +0000 UTC m=+21.364975584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.790854 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.790939 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.790952 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.791072 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.791166 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.791426 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.791609 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.791770 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.793008 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.793848 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.796450 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.796540 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.796625 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.796652 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.797476 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.798747 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.798753 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.800374 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.800631 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.804450 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.804606 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.814228 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.814473 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.827383 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.830591 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.837085 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.838665 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.842334 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.845557 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0" exitCode=255 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.845618 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0"} Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.849886 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.857508 4769 scope.go:117] "RemoveContainer" containerID="d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.860045 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.861055 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862278 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862321 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862380 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862389 4769 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862440 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862450 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862465 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862479 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862449 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862529 4769 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862540 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862552 4769 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862562 4769 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862574 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862584 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862593 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862601 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862610 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862621 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862630 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862638 4769 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862651 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862660 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862670 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862680 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862691 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862723 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862732 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862740 4769 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862749 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862758 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862769 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862777 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862786 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862793 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862801 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862810 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862819 4769 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862829 4769 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862837 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862845 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862854 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862863 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862871 4769 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862879 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862888 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862897 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862905 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862914 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.862924 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863047 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863059 4769 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863070 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863080 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863089 4769 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863098 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863108 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863119 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863128 4769 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863138 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863146 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863155 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863164 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863177 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863185 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863194 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863202 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863212 4769 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863233 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863243 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863252 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863265 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863275 4769 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863283 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863292 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863300 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863308 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863316 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863324 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863332 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863341 4769 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.863349 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.876557 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.886824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.896379 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.916831 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.941805 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.959834 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.963440 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.974976 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 31 16:29:32 crc kubenswrapper[4769]: W0131 16:29:32.976618 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-f0869be842af259e43284fe4037333cc75b7a96906fc2451f638edf219fa3892 WatchSource:0}: Error finding container f0869be842af259e43284fe4037333cc75b7a96906fc2451f638edf219fa3892: Status 404 returned error can't find the container with id f0869be842af259e43284fe4037333cc75b7a96906fc2451f638edf219fa3892 Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.982740 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:32 crc kubenswrapper[4769]: W0131 16:29:32.985788 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-363d66eef214ff51d931b3803d8b17fc8345b7419beb7cbbf3863aaf72f6642e WatchSource:0}: Error finding container 363d66eef214ff51d931b3803d8b17fc8345b7419beb7cbbf3863aaf72f6642e: Status 404 returned error can't find the container with id 363d66eef214ff51d931b3803d8b17fc8345b7419beb7cbbf3863aaf72f6642e Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.989335 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 31 16:29:32 crc kubenswrapper[4769]: I0131 16:29:32.997298 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.266063 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.266162 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.266191 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.266250 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:29:34.266233807 +0000 UTC m=+22.340402476 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.266279 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.266386 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.266470 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:34.266429522 +0000 UTC m=+22.340598191 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.266491 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:34.266484563 +0000 UTC m=+22.340653222 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.283075 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-31 16:24:32 +0000 UTC, rotation deadline is 2026-10-25 03:36:41.238216716 +0000 UTC Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.283132 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6395h7m7.955086589s for next certificate rotation Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.290550 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-slrbh"] Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.290842 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.293762 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.296244 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.296946 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.308185 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.321837 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.342064 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.355989 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.366849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.367089 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqcpx\" (UniqueName: \"kubernetes.io/projected/48d46c05-78b8-4355-9027-77efbbfbe87c-kube-api-access-hqcpx\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.367111 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/48d46c05-78b8-4355-9027-77efbbfbe87c-hosts-file\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.367129 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367231 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367264 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367277 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367331 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:34.36731313 +0000 UTC m=+22.441481799 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367240 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367417 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367441 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:33 crc kubenswrapper[4769]: E0131 16:29:33.367544 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:34.367521275 +0000 UTC m=+22.441689954 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.369941 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.386789 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.407675 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.431881 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.467680 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqcpx\" (UniqueName: \"kubernetes.io/projected/48d46c05-78b8-4355-9027-77efbbfbe87c-kube-api-access-hqcpx\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.467740 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/48d46c05-78b8-4355-9027-77efbbfbe87c-hosts-file\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.467822 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/48d46c05-78b8-4355-9027-77efbbfbe87c-hosts-file\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.500427 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqcpx\" (UniqueName: \"kubernetes.io/projected/48d46c05-78b8-4355-9027-77efbbfbe87c-kube-api-access-hqcpx\") pod \"node-resolver-slrbh\" (UID: \"48d46c05-78b8-4355-9027-77efbbfbe87c\") " pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.610868 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-slrbh" Jan 31 16:29:33 crc kubenswrapper[4769]: W0131 16:29:33.620299 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48d46c05_78b8_4355_9027_77efbbfbe87c.slice/crio-ab5e6989f0fa08ea45a3d922187533c4f1389ef8541e25e0687d8ce4d1c024d3 WatchSource:0}: Error finding container ab5e6989f0fa08ea45a3d922187533c4f1389ef8541e25e0687d8ce4d1c024d3: Status 404 returned error can't find the container with id ab5e6989f0fa08ea45a3d922187533c4f1389ef8541e25e0687d8ce4d1c024d3 Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.653093 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 03:09:14.058401445 +0000 UTC Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.654008 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-rftqz"] Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.654967 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-g5kbw"] Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.655122 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-4bqbm"] Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.655177 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.655344 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.655394 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.657555 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.657799 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.658150 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.658438 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.658622 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.658664 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.659122 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.659342 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.659603 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.659852 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.660594 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.663670 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.668810 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.681440 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.689817 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.701141 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.713528 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.730439 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.741705 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.755524 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.767877 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770258 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770306 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w7km\" (UniqueName: \"kubernetes.io/projected/1d352f75-43f7-4b8c-867e-cfb17bbbe011-kube-api-access-4w7km\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770328 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-socket-dir-parent\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770352 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770374 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-system-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770459 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-etc-kubernetes\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770480 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-hostroot\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770517 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-multus-certs\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770537 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770556 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-multus\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770594 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-os-release\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770700 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770758 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-kubelet\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770783 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d352f75-43f7-4b8c-867e-cfb17bbbe011-mcd-auth-proxy-config\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770817 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrkdd\" (UniqueName: \"kubernetes.io/projected/8f9e971a-93ce-4a49-a970-a2789486d12c-kube-api-access-mrkdd\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770840 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-k8s-cni-cncf-io\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-netns\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770878 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-daemon-config\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770913 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-bin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.770931 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-conf-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771021 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-cnibin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771046 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m86wt\" (UniqueName: \"kubernetes.io/projected/4a7cfe09-9892-494d-a420-5d720afb3df3-kube-api-access-m86wt\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771064 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d352f75-43f7-4b8c-867e-cfb17bbbe011-proxy-tls\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771083 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d352f75-43f7-4b8c-867e-cfb17bbbe011-rootfs\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771149 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-cnibin\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-cni-binary-copy\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771249 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-system-cni-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.771301 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-os-release\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.780471 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.794128 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.806440 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.824156 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.841244 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.850257 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.852222 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.852530 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.853085 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"560b89be4f73041e7fbb3968b734e4fcf28f4f191c0e8929beef5473e04bf306"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.854521 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.854557 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.854568 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"363d66eef214ff51d931b3803d8b17fc8345b7419beb7cbbf3863aaf72f6642e"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.855587 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-slrbh" event={"ID":"48d46c05-78b8-4355-9027-77efbbfbe87c","Type":"ContainerStarted","Data":"c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.855614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-slrbh" event={"ID":"48d46c05-78b8-4355-9027-77efbbfbe87c","Type":"ContainerStarted","Data":"ab5e6989f0fa08ea45a3d922187533c4f1389ef8541e25e0687d8ce4d1c024d3"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.856561 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.856608 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f0869be842af259e43284fe4037333cc75b7a96906fc2451f638edf219fa3892"} Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.858406 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.871450 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872789 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-bin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872830 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-conf-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872851 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-cnibin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m86wt\" (UniqueName: \"kubernetes.io/projected/4a7cfe09-9892-494d-a420-5d720afb3df3-kube-api-access-m86wt\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d352f75-43f7-4b8c-867e-cfb17bbbe011-proxy-tls\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d352f75-43f7-4b8c-867e-cfb17bbbe011-rootfs\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872928 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-bin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872944 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-cnibin\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872959 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-cni-binary-copy\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.872988 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-system-cni-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873004 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-os-release\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873036 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w7km\" (UniqueName: \"kubernetes.io/projected/1d352f75-43f7-4b8c-867e-cfb17bbbe011-kube-api-access-4w7km\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873051 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-socket-dir-parent\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873071 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873088 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-system-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-hostroot\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873119 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-multus-certs\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873134 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-etc-kubernetes\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873155 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873170 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-multus\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873186 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-os-release\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873193 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-conf-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873247 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-kubelet\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-k8s-cni-cncf-io\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873291 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-netns\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873309 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-daemon-config\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873331 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d352f75-43f7-4b8c-867e-cfb17bbbe011-mcd-auth-proxy-config\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873353 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrkdd\" (UniqueName: \"kubernetes.io/projected/8f9e971a-93ce-4a49-a970-a2789486d12c-kube-api-access-mrkdd\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873617 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-cnibin\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873641 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-kubelet\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873664 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-k8s-cni-cncf-io\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873684 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-netns\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.873959 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-socket-dir-parent\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874044 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874099 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-system-cni-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874371 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d352f75-43f7-4b8c-867e-cfb17bbbe011-mcd-auth-proxy-config\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874376 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-daemon-config\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874413 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-etc-kubernetes\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874448 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-system-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874470 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-hostroot\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874513 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-run-multus-certs\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874533 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8f9e971a-93ce-4a49-a970-a2789486d12c-cni-binary-copy\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874542 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1d352f75-43f7-4b8c-867e-cfb17bbbe011-rootfs\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874551 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a7cfe09-9892-494d-a420-5d720afb3df3-cni-binary-copy\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874570 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-os-release\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874582 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-host-var-lib-cni-multus\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874630 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-cnibin\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-os-release\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874739 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a7cfe09-9892-494d-a420-5d720afb3df3-multus-cni-dir\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.874992 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8f9e971a-93ce-4a49-a970-a2789486d12c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.884649 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d352f75-43f7-4b8c-867e-cfb17bbbe011-proxy-tls\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.887782 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.888812 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m86wt\" (UniqueName: \"kubernetes.io/projected/4a7cfe09-9892-494d-a420-5d720afb3df3-kube-api-access-m86wt\") pod \"multus-g5kbw\" (UID: \"4a7cfe09-9892-494d-a420-5d720afb3df3\") " pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.890642 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w7km\" (UniqueName: \"kubernetes.io/projected/1d352f75-43f7-4b8c-867e-cfb17bbbe011-kube-api-access-4w7km\") pod \"machine-config-daemon-4bqbm\" (UID: \"1d352f75-43f7-4b8c-867e-cfb17bbbe011\") " pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.900849 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.912536 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.925813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.935000 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.946312 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.958617 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.969434 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.975568 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-g5kbw" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.982636 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.987190 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrkdd\" (UniqueName: \"kubernetes.io/projected/8f9e971a-93ce-4a49-a970-a2789486d12c-kube-api-access-mrkdd\") pod \"multus-additional-cni-plugins-rftqz\" (UID: \"8f9e971a-93ce-4a49-a970-a2789486d12c\") " pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:33 crc kubenswrapper[4769]: I0131 16:29:33.987633 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:33 crc kubenswrapper[4769]: W0131 16:29:33.996835 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d352f75_43f7_4b8c_867e_cfb17bbbe011.slice/crio-10aefe8f8850dfe18ab2a2cba1b2653857e8191ebd15e5e7d94d2d42e28f98f3 WatchSource:0}: Error finding container 10aefe8f8850dfe18ab2a2cba1b2653857e8191ebd15e5e7d94d2d42e28f98f3: Status 404 returned error can't find the container with id 10aefe8f8850dfe18ab2a2cba1b2653857e8191ebd15e5e7d94d2d42e28f98f3 Jan 31 16:29:33 crc kubenswrapper[4769]: W0131 16:29:33.997438 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a7cfe09_9892_494d_a420_5d720afb3df3.slice/crio-ab27b92234d50ba2d0bf1d835d06a5bed979b5b6bce169bde0003785c282f794 WatchSource:0}: Error finding container ab27b92234d50ba2d0bf1d835d06a5bed979b5b6bce169bde0003785c282f794: Status 404 returned error can't find the container with id ab27b92234d50ba2d0bf1d835d06a5bed979b5b6bce169bde0003785c282f794 Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.005980 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.020793 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2r9tc"] Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.021547 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.021953 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.024077 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.024143 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.024222 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.026524 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.026793 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.026935 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.027426 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.037804 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.050845 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.062915 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.072730 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075351 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075404 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075431 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075448 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075600 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075629 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075649 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075691 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075711 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075767 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075789 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075810 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075846 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075907 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.075965 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9jb8\" (UniqueName: \"kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.076056 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.076095 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.076137 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.086545 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.103103 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.120416 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.134421 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.149180 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177065 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177131 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177171 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177235 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177272 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177301 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177330 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177360 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177395 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177471 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177530 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177560 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177589 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177619 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9jb8\" (UniqueName: \"kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177663 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177694 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177737 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.177771 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.178837 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.178924 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.178973 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179015 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179061 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179105 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179151 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179192 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179232 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.179288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180063 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180143 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180197 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180240 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180306 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.180421 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.181143 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.181219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.183382 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.185850 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.208223 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9jb8\" (UniqueName: \"kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8\") pod \"ovnkube-node-2r9tc\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.240030 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.269104 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rftqz" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.279110 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.279265 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.279294 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.279330 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:29:36.279296392 +0000 UTC m=+24.353465121 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.279372 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.279426 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:36.279412965 +0000 UTC m=+24.353581634 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.279564 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.279721 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:36.279686462 +0000 UTC m=+24.353855281 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: W0131 16:29:34.281269 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f9e971a_93ce_4a49_a970_a2789486d12c.slice/crio-2b0ab1892fd77efcd2c00455367e7ac0ce7a39a1924fe5463b2e8b8815ed7c1e WatchSource:0}: Error finding container 2b0ab1892fd77efcd2c00455367e7ac0ce7a39a1924fe5463b2e8b8815ed7c1e: Status 404 returned error can't find the container with id 2b0ab1892fd77efcd2c00455367e7ac0ce7a39a1924fe5463b2e8b8815ed7c1e Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.282159 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.319471 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.367802 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.380249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.380293 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380423 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380438 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380449 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380465 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380539 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380554 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380513 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:36.380485117 +0000 UTC m=+24.454653776 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.380659 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:36.380619761 +0000 UTC m=+24.454788430 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.407323 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:34 crc kubenswrapper[4769]: W0131 16:29:34.427207 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86f2019b_d6ca_4e73_9dac_52fe746489cb.slice/crio-3df7a03ee7c709da23684d8bc4251353a97795fa88b872fcfba9c869f0f93647 WatchSource:0}: Error finding container 3df7a03ee7c709da23684d8bc4251353a97795fa88b872fcfba9c869f0f93647: Status 404 returned error can't find the container with id 3df7a03ee7c709da23684d8bc4251353a97795fa88b872fcfba9c869f0f93647 Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.429250 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.452718 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.496730 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.545929 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.653708 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 02:47:00.516614035 +0000 UTC Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.707616 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.707642 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.707771 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.707845 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.707966 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:34 crc kubenswrapper[4769]: E0131 16:29:34.708290 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.712567 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.713206 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.713921 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.714521 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.715100 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.715635 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.716213 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.716741 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.717315 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.717848 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.718360 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.719008 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.719527 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.720020 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.720538 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.721036 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.721599 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.722026 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.725652 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.726236 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.727119 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.727691 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.728098 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.729220 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.729750 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.730870 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.731466 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.732317 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.732872 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.733713 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.734188 4769 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.734285 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.736212 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.736737 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.737154 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.738736 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.739673 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.740192 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.741163 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.741803 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.742767 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.743330 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.744278 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.744862 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.745662 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.746743 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.747780 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.748825 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.749710 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.750236 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.751893 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.752563 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.754008 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.754843 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.861388 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" exitCode=0 Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.861470 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.861703 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"3df7a03ee7c709da23684d8bc4251353a97795fa88b872fcfba9c869f0f93647"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.863637 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerStarted","Data":"4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.863717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerStarted","Data":"2b0ab1892fd77efcd2c00455367e7ac0ce7a39a1924fe5463b2e8b8815ed7c1e"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.865325 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerStarted","Data":"e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.865353 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerStarted","Data":"ab27b92234d50ba2d0bf1d835d06a5bed979b5b6bce169bde0003785c282f794"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.867023 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.867081 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.867094 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"10aefe8f8850dfe18ab2a2cba1b2653857e8191ebd15e5e7d94d2d42e28f98f3"} Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.873899 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.891788 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.905315 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.935968 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.951521 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.967590 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.979438 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:34 crc kubenswrapper[4769]: I0131 16:29:34.994206 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:34Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.010960 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.026920 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.039564 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.055228 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.076647 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.095800 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.125237 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.172726 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.228522 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.246256 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.281370 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.282293 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.285613 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.320640 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.339413 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.382827 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.418554 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.458867 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.496363 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.538634 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.580278 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.627984 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.654713 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 13:10:50.241254895 +0000 UTC Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.665666 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.705336 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.747139 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.781406 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.823388 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.863160 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.872207 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.877427 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.877468 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.877483 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.877519 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.877533 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.879110 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78" exitCode=0 Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.879231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78"} Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.909611 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.944075 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:35 crc kubenswrapper[4769]: I0131 16:29:35.988526 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:35Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.021528 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.066526 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.102886 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.141247 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.180178 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.218770 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.265040 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.299807 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.299874 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.299983 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:29:40.299957158 +0000 UTC m=+28.374125847 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.300285 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.300367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.300472 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.300522 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.300570 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:40.300556935 +0000 UTC m=+28.374725624 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.300605 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:40.300587035 +0000 UTC m=+28.374755704 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.348392 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.383973 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.401575 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.401706 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401839 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401881 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401891 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401921 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401941 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.401898 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.402010 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:40.401988168 +0000 UTC m=+28.476156867 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.402069 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:40.40204692 +0000 UTC m=+28.476215789 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.419268 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.469703 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.500760 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.549231 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.655180 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 17:33:44.995227967 +0000 UTC Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.707928 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.708064 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.708136 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.708090 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.708322 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:36 crc kubenswrapper[4769]: E0131 16:29:36.708461 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.887155 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880" exitCode=0 Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.887545 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880"} Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.892400 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.905682 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.922313 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.935691 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.951035 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.964958 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:36 crc kubenswrapper[4769]: I0131 16:29:36.993940 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:36Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.009749 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.028921 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.043424 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.056288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.068298 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.083476 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.099581 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.656157 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 03:01:01.192037914 +0000 UTC Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.897438 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651" exitCode=0 Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.897482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651"} Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.911586 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.932291 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.944897 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.962245 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.976612 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:37 crc kubenswrapper[4769]: I0131 16:29:37.993969 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:37Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.011992 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.022741 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.042420 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.060399 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.074297 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.088949 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.104487 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.208933 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-lw4fx"] Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.209336 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.211700 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.211760 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.212178 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.212335 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.224704 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.237382 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.247579 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.274201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.288307 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.309414 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.321218 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q9l7\" (UniqueName: \"kubernetes.io/projected/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-kube-api-access-6q9l7\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.321257 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-serviceca\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.321317 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-host\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.324820 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.337782 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.349564 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.361095 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.381073 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.402628 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.415364 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.422890 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q9l7\" (UniqueName: \"kubernetes.io/projected/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-kube-api-access-6q9l7\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.422964 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-serviceca\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.423073 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-host\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.423168 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-host\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.426665 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-serviceca\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.429488 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.458296 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q9l7\" (UniqueName: \"kubernetes.io/projected/97c97bcd-bd44-4fdf-a90a-2d6be88c23e4-kube-api-access-6q9l7\") pod \"node-ca-lw4fx\" (UID: \"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\") " pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.522819 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-lw4fx" Jan 31 16:29:38 crc kubenswrapper[4769]: W0131 16:29:38.543387 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97c97bcd_bd44_4fdf_a90a_2d6be88c23e4.slice/crio-f58a480d20224cadda3b410aef96d3b5d559b14ee3bb3e73f0f818c7f71f336a WatchSource:0}: Error finding container f58a480d20224cadda3b410aef96d3b5d559b14ee3bb3e73f0f818c7f71f336a: Status 404 returned error can't find the container with id f58a480d20224cadda3b410aef96d3b5d559b14ee3bb3e73f0f818c7f71f336a Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.623091 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.626307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.626373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.626385 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.626441 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.636777 4769 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.637133 4769 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.638556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.638605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.638623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.638646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.638663 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.654755 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.656671 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 06:12:58.904524283 +0000 UTC Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.659389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.659439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.659456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.659480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.659529 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.672863 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.676135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.676183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.676201 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.676226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.676243 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.694013 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.698476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.698534 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.698548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.698568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.698584 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.707458 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.707553 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.707640 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.707467 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.707771 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.707835 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.712159 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.716748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.716844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.716874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.716912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.716941 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.730947 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: E0131 16:29:38.731127 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.734474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.734540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.734558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.734581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.734599 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.837357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.837398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.837408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.837424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.837433 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.904519 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f" exitCode=0 Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.904596 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.911889 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.913784 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-lw4fx" event={"ID":"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4","Type":"ContainerStarted","Data":"f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.913827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-lw4fx" event={"ID":"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4","Type":"ContainerStarted","Data":"f58a480d20224cadda3b410aef96d3b5d559b14ee3bb3e73f0f818c7f71f336a"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.923010 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.937433 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.940708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.940746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.940756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.940772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.940781 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:38Z","lastTransitionTime":"2026-01-31T16:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.961684 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.976341 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:38 crc kubenswrapper[4769]: I0131 16:29:38.994188 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:38Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.013069 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.026699 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.037577 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.042845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.042889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.042901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.042917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.042927 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.051037 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.067101 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.086966 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.099293 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.117526 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.139048 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.146362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.146398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.146408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.146426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.146436 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.155961 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.174855 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.193423 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.206876 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.218300 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.231520 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.245567 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.249176 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.249217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.249231 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.249250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.249263 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.258535 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.271013 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.281319 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.293612 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.323684 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.351471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.351535 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.351550 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.351568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.351581 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.359640 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.402604 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.453688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.453726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.453735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.453751 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.453762 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.556877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.556941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.556959 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.556983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.557073 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.657449 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 07:05:41.761649284 +0000 UTC Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.659637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.659683 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.659694 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.659712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.659724 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.762704 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.762783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.762811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.762842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.762864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.865876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.865933 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.865942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.865965 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.865977 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.920428 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerStarted","Data":"e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.936803 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.958896 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.968232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.968272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.968282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.968300 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.968313 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:39Z","lastTransitionTime":"2026-01-31T16:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.971276 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:39 crc kubenswrapper[4769]: I0131 16:29:39.991733 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:39Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.008061 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.022410 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.036350 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.057485 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.069690 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.070702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.070731 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.070739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.070753 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.070764 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.087541 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.099980 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.110952 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.126306 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.139708 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.173012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.173351 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.173360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.173377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.173389 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.275413 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.275446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.275454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.275469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.275478 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.343294 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.343430 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.343412242 +0000 UTC m=+36.417580911 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.343467 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.343513 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.343573 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.343599 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.343603 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.343596977 +0000 UTC m=+36.417765646 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.343696 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.343638238 +0000 UTC m=+36.417806907 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.378670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.378701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.378727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.378742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.378751 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.444850 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.444914 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445038 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445056 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445067 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445106 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.445093281 +0000 UTC m=+36.519261950 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445105 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445141 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445154 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.445215 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.445196524 +0000 UTC m=+36.519365293 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.481123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.481157 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.481165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.481180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.481190 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.583375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.583408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.583415 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.583428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.583437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.659253 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 22:50:15.958258206 +0000 UTC Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.686027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.686051 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.686060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.686073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.686082 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.707691 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.707777 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.707691 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.707799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.708024 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:40 crc kubenswrapper[4769]: E0131 16:29:40.708216 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.788446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.788473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.788481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.788518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.788531 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.890325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.890366 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.890377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.890393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.890404 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.926950 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a" exitCode=0 Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.927033 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.931701 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.932259 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.932310 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.944985 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.969360 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.980919 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994128 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994162 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:40Z","lastTransitionTime":"2026-01-31T16:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:40 crc kubenswrapper[4769]: I0131 16:29:40.994881 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.010794 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.011935 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.012002 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.027229 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.044217 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.062812 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.087273 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.119897 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.121914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.121949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.121958 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.121973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.121984 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.158195 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.169677 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.179830 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.189609 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.200745 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.211718 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.222112 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.224154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.224201 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.224216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.224237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.224251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.236562 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.252283 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.274628 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.295577 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.310012 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.326932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.326979 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.326989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.327005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.327017 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.331254 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.343126 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.358187 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.374921 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.388260 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.405430 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.429814 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.429862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.429876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.429899 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.429915 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.533116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.533176 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.533190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.533210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.533222 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.636203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.636253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.636266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.636285 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.636297 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.659661 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 04:14:51.447318668 +0000 UTC Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.738998 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.739109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.739129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.739165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.739193 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.841121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.841154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.841163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.841176 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.841186 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.939559 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f9e971a-93ce-4a49-a970-a2789486d12c" containerID="b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef" exitCode=0 Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.939692 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.940055 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerDied","Data":"b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.942859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.942892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.942901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.942916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.942930 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:41Z","lastTransitionTime":"2026-01-31T16:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.960541 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.981540 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:41 crc kubenswrapper[4769]: I0131 16:29:41.999282 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:41Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.027284 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.045445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.045482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.045508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.045524 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.045533 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.049630 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.076300 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.094039 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.112284 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.127600 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.145507 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.147919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.147948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.147957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.147971 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.147981 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.157043 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.172813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.190989 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.204695 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.250636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.250679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.250694 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.250727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.250746 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.353135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.353193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.353210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.353233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.353249 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.455121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.455170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.455184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.455203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.455217 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.557409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.557437 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.557445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.557458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.557468 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.659623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.659652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.659660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.659674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.659683 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.660039 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 12:13:13.190967407 +0000 UTC Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.709885 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:42 crc kubenswrapper[4769]: E0131 16:29:42.709981 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.710209 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:42 crc kubenswrapper[4769]: E0131 16:29:42.710260 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.710301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:42 crc kubenswrapper[4769]: E0131 16:29:42.710338 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.721201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.733090 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.744469 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.755551 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.761726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.761758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.761769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.761786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.761797 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.769269 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.781776 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.796020 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.807319 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.833090 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.852268 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.864202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.864295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.864319 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.864354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.864379 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.871715 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.889330 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.907754 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.918009 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.946726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" event={"ID":"8f9e971a-93ce-4a49-a970-a2789486d12c","Type":"ContainerStarted","Data":"f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.946781 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.958348 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.966649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.966678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.966687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.966701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.966711 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:42Z","lastTransitionTime":"2026-01-31T16:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.974794 4769 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.979331 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:42 crc kubenswrapper[4769]: I0131 16:29:42.995325 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.009406 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.022205 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.036565 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.050941 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.064330 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.068435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.068462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.068470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.068484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.068510 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.079281 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.094243 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.109141 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.124332 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.137448 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.153778 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.170701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.170747 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.170761 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.170779 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.170793 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.273778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.274177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.274190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.274210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.274222 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.376374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.376415 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.376426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.376443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.376456 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.479317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.479380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.479397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.479425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.479442 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.582674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.582738 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.582754 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.582780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.582797 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.660564 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 18:03:42.236557095 +0000 UTC Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.685969 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.686028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.686040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.686060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.686377 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.789890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.789941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.789956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.789978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.789993 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.892645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.892710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.892728 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.892755 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.892774 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.956764 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/0.log" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.961226 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45" exitCode=1 Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.961288 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45"} Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.962393 4769 scope.go:117] "RemoveContainer" containerID="790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.981917 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.997449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.997626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.997644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.997670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:43 crc kubenswrapper[4769]: I0131 16:29:43.997689 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:43Z","lastTransitionTime":"2026-01-31T16:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.000083 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.030336 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.045794 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.063937 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.080000 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.094204 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.100070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.100118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.100134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.100155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.100169 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.109229 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.126968 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.141305 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.160260 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.175809 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.192040 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204461 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204506 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.204935 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.307556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.307617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.307635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.307662 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.307682 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.410584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.410717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.410769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.410799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.410823 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.411490 4769 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.513161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.513215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.513232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.513257 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.513275 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.616792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.616865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.616889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.616921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.616947 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.661433 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 13:08:59.755867298 +0000 UTC Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.707944 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.707963 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.708013 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:44 crc kubenswrapper[4769]: E0131 16:29:44.708082 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:44 crc kubenswrapper[4769]: E0131 16:29:44.708237 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:44 crc kubenswrapper[4769]: E0131 16:29:44.708405 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.719100 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.719149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.719162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.719184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.719196 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.820919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.820982 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.820993 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.821013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.821025 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.923890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.923926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.923941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.923959 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.923973 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:44Z","lastTransitionTime":"2026-01-31T16:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.966104 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/0.log" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.969369 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774"} Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.969618 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:44 crc kubenswrapper[4769]: I0131 16:29:44.983531 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:44Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.006889 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.018402 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.026676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.026721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.026732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.026747 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.026757 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.032350 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.044996 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.055113 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.068705 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.087102 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.106084 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.119500 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.129626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.129667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.129682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.129699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.129711 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.130514 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.143749 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.156928 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.166931 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.231912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.231953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.231962 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.231976 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.231986 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.334819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.334952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.334980 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.335033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.335064 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.437849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.437896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.437908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.437924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.437936 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.540572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.540633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.540647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.540671 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.540686 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.644386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.644446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.644463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.644492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.644546 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.662343 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 19:40:09.114504361 +0000 UTC Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.748331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.748400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.748418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.748458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.748504 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.852162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.852250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.852279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.852321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.852354 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.956449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.956550 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.956568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.956595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.956612 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:45Z","lastTransitionTime":"2026-01-31T16:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.975097 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/1.log" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.976078 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/0.log" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.979393 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774" exitCode=1 Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.979455 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774"} Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.979575 4769 scope.go:117] "RemoveContainer" containerID="790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.981400 4769 scope.go:117] "RemoveContainer" containerID="3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774" Jan 31 16:29:45 crc kubenswrapper[4769]: E0131 16:29:45.981662 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:29:45 crc kubenswrapper[4769]: I0131 16:29:45.993046 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:45Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.012122 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.025119 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.042937 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.049090 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58"] Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.049628 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.051685 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.052071 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.060411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.060461 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.060473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.060497 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.060515 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.066735 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.082677 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.104689 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.106227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.106309 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e39e875a-bea7-4e27-af9a-f769a493efe7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.106366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6shzp\" (UniqueName: \"kubernetes.io/projected/e39e875a-bea7-4e27-af9a-f769a493efe7-kube-api-access-6shzp\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.106592 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.119134 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.134784 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.156621 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.163801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.163858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.163876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.163902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.163918 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.176163 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.193165 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.207815 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.207945 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.207983 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e39e875a-bea7-4e27-af9a-f769a493efe7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.208019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6shzp\" (UniqueName: \"kubernetes.io/projected/e39e875a-bea7-4e27-af9a-f769a493efe7-kube-api-access-6shzp\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.208949 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.209268 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e39e875a-bea7-4e27-af9a-f769a493efe7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.212119 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.217790 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e39e875a-bea7-4e27-af9a-f769a493efe7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.231644 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.237569 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6shzp\" (UniqueName: \"kubernetes.io/projected/e39e875a-bea7-4e27-af9a-f769a493efe7-kube-api-access-6shzp\") pod \"ovnkube-control-plane-749d76644c-kvc58\" (UID: \"e39e875a-bea7-4e27-af9a-f769a493efe7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.247501 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.264704 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.266014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.266060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.266073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.266093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.266105 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.277360 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.294277 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.316713 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.335825 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.354911 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.363884 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.368577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.368684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.368743 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.368787 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.368849 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.370229 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: W0131 16:29:46.384394 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode39e875a_bea7_4e27_af9a_f769a493efe7.slice/crio-c274a019edd7d792b2f42053bcb00460db415e837a1a71c08773bb66a47fc27b WatchSource:0}: Error finding container c274a019edd7d792b2f42053bcb00460db415e837a1a71c08773bb66a47fc27b: Status 404 returned error can't find the container with id c274a019edd7d792b2f42053bcb00460db415e837a1a71c08773bb66a47fc27b Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.391082 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.416529 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.437790 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.450845 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.469365 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.471402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.471442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.471457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.471477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.471496 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.486875 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.503769 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:46Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.574648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.574706 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.574719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.574741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.574758 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.663079 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 08:06:28.162345076 +0000 UTC Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.677583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.677619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.677628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.677644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.677654 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.707763 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.707783 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:46 crc kubenswrapper[4769]: E0131 16:29:46.707871 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:46 crc kubenswrapper[4769]: E0131 16:29:46.708062 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.708245 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:46 crc kubenswrapper[4769]: E0131 16:29:46.708350 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.779909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.779947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.779961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.779976 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.779986 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.920234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.920788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.920802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.920823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.920835 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:46Z","lastTransitionTime":"2026-01-31T16:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.987642 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/1.log" Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.996385 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" event={"ID":"e39e875a-bea7-4e27-af9a-f769a493efe7","Type":"ContainerStarted","Data":"0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.996443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" event={"ID":"e39e875a-bea7-4e27-af9a-f769a493efe7","Type":"ContainerStarted","Data":"c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503"} Jan 31 16:29:46 crc kubenswrapper[4769]: I0131 16:29:46.996467 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" event={"ID":"e39e875a-bea7-4e27-af9a-f769a493efe7","Type":"ContainerStarted","Data":"c274a019edd7d792b2f42053bcb00460db415e837a1a71c08773bb66a47fc27b"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.009190 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.020065 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.023427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.023463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.023479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.023505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.023518 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.030340 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.041697 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.054421 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.065117 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.091382 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.102743 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.111809 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.125925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.126071 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.126148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.126499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.126584 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.131827 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.142540 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.160078 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.171457 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.186743 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.195755 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.229407 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.229646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.229732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.229797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.229863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.332930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.333023 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.333045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.333070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.333089 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.435759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.435815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.435833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.435856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.435875 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.539256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.539299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.539310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.539325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.539334 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.569711 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-bl9cd"] Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.570828 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: E0131 16:29:47.570963 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.617646 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.628132 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzcbr\" (UniqueName: \"kubernetes.io/projected/428b0729-22d7-4feb-a392-1ec77e5acbc0-kube-api-access-xzcbr\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.628428 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.640275 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.642624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.642792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.642908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.643034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.643139 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.663921 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 11:11:29.742889835 +0000 UTC Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.668862 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.685824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.699358 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.713797 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.725182 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.729315 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzcbr\" (UniqueName: \"kubernetes.io/projected/428b0729-22d7-4feb-a392-1ec77e5acbc0-kube-api-access-xzcbr\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.729352 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: E0131 16:29:47.729439 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:47 crc kubenswrapper[4769]: E0131 16:29:47.729485 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:48.229471273 +0000 UTC m=+36.303639942 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.735243 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.745760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.745801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.745817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.745836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.745849 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.749462 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.750632 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzcbr\" (UniqueName: \"kubernetes.io/projected/428b0729-22d7-4feb-a392-1ec77e5acbc0-kube-api-access-xzcbr\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.764087 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.777789 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.794806 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.812542 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.827968 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.845336 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.848105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.848170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.848188 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.848212 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.848228 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.865384 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:47Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.951605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.951644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.951655 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.951670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:47 crc kubenswrapper[4769]: I0131 16:29:47.951680 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:47Z","lastTransitionTime":"2026-01-31T16:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.055139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.055222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.055247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.055275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.055294 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.158123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.158181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.158193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.158208 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.158220 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.235072 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.235280 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.235401 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:49.235373702 +0000 UTC m=+37.309542401 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.261704 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.261768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.261787 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.261812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.261829 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.364726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.364770 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.364782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.364801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.364815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.438051 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.438206 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.438319 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:30:04.438282129 +0000 UTC m=+52.512450838 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.438371 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.438447 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.438459 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:04.438435273 +0000 UTC m=+52.512603982 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.438665 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.438768 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:04.438746662 +0000 UTC m=+52.512915351 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.468486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.468570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.468591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.468620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.468640 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.540644 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.540861 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541149 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541194 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541222 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541255 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541303 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541352 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541404 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:04.541373719 +0000 UTC m=+52.615542428 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.541461 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:04.54142451 +0000 UTC m=+52.615593219 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.572363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.572448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.572470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.572540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.572568 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.665121 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 18:34:07.403522234 +0000 UTC Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.675625 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.675676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.675690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.675710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.675725 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.707170 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.707337 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.707410 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.707480 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.707574 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.707766 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.707882 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.708057 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.778510 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.778595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.778614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.778636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.778653 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.881164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.881210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.881219 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.881234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.881245 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.901060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.901110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.901122 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.901145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.901155 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.921447 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:48Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.925735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.925778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.925799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.925823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.925839 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.943227 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:48Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.947192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.947247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.947270 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.947294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.947311 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.968418 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:48Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.972606 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.972648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.972659 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.972675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.972687 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:48 crc kubenswrapper[4769]: E0131 16:29:48.989583 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:48Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.994328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.994424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.994443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.994475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:48 crc kubenswrapper[4769]: I0131 16:29:48.994525 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:48Z","lastTransitionTime":"2026-01-31T16:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: E0131 16:29:49.013316 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:49Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:49 crc kubenswrapper[4769]: E0131 16:29:49.013457 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.014964 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.015000 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.015010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.015027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.015038 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.117923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.117968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.117976 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.117989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.117999 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.220108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.220171 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.220181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.220195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.220203 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.247950 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:49 crc kubenswrapper[4769]: E0131 16:29:49.248189 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:49 crc kubenswrapper[4769]: E0131 16:29:49.248292 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:51.248268983 +0000 UTC m=+39.322437662 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.323158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.323275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.323304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.323337 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.323363 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.427005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.427073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.427091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.427116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.427137 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.530238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.530286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.530297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.530312 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.530323 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.633827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.633893 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.633911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.633937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.633956 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.666314 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 16:57:46.59848809 +0000 UTC Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.737332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.737425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.737446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.737471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.737488 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.840926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.841017 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.841041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.841065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.841084 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.944989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.945181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.945206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.945237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:49 crc kubenswrapper[4769]: I0131 16:29:49.945320 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:49Z","lastTransitionTime":"2026-01-31T16:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.048964 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.049038 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.049059 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.049089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.049110 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.153166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.153256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.153274 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.153305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.153326 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.257409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.257469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.257486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.257539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.257557 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.360833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.360909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.360934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.360969 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.360992 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.464417 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.464487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.464533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.464558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.464575 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.567402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.567451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.567465 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.567482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.567497 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.667242 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 09:00:48.454866804 +0000 UTC Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.670760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.670805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.670818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.670834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.670846 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.708332 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.708370 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.708465 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.708416 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:50 crc kubenswrapper[4769]: E0131 16:29:50.708569 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:50 crc kubenswrapper[4769]: E0131 16:29:50.708759 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:50 crc kubenswrapper[4769]: E0131 16:29:50.708949 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:50 crc kubenswrapper[4769]: E0131 16:29:50.709129 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.775138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.775355 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.775392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.775431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.775471 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.879585 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.879649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.879666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.879695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.879713 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.983067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.983137 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.983155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.983238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:50 crc kubenswrapper[4769]: I0131 16:29:50.983269 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:50Z","lastTransitionTime":"2026-01-31T16:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.086549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.086614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.086631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.086663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.086681 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.190010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.190096 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.190120 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.190148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.190170 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.272389 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:51 crc kubenswrapper[4769]: E0131 16:29:51.272575 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:51 crc kubenswrapper[4769]: E0131 16:29:51.272644 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:29:55.272626752 +0000 UTC m=+43.346795431 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.293888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.293952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.293970 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.293994 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.294011 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.397849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.397901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.397912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.397933 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.397945 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.500794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.500844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.500861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.500885 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.500902 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.604160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.604250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.604268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.604294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.604312 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.654874 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.667655 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 21:46:27.681806297 +0000 UTC Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.676485 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.689995 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.706919 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.707074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.707226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.707238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.707274 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.707285 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.720069 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.751890 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.770486 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.785766 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.798461 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.810866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.810920 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.810935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.810956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.810972 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.815710 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.828861 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.841561 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.858549 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.877726 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.901088 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.914657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.914713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.914726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.914746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.914761 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:51Z","lastTransitionTime":"2026-01-31T16:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.925017 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:51 crc kubenswrapper[4769]: I0131 16:29:51.942393 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:51Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.017551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.017610 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.017632 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.017646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.017654 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.119699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.119732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.119741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.119755 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.119765 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.223655 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.223718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.223730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.223749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.223763 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.326798 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.326866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.326883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.326912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.326935 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.430235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.430289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.430308 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.430333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.430358 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.533936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.533993 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.534009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.534034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.534052 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.637473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.637626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.637647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.637678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.637706 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.668602 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 02:02:49.128971889 +0000 UTC Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.707292 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.707292 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.707359 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:52 crc kubenswrapper[4769]: E0131 16:29:52.707469 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.707531 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:52 crc kubenswrapper[4769]: E0131 16:29:52.707643 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:52 crc kubenswrapper[4769]: E0131 16:29:52.707735 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:52 crc kubenswrapper[4769]: E0131 16:29:52.707823 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.720592 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.733104 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.740270 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.740303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.740311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.740328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.740338 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.745845 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.761212 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.777813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.792403 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.822849 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://790e576890fdac9000118d5f35a9035f93d169a103e1070a426a862fe69f0c45\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:43Z\\\",\\\"message\\\":\\\"v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190451 6061 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.190564 6061 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0131 16:29:43.190546 6061 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0131 16:29:43.190751 6061 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0131 16:29:43.190814 6061 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0131 16:29:43.190934 6061 factory.go:656] Stopping watch factory\\\\nI0131 16:29:43.190991 6061 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0131 16:29:43.191080 6061 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191306 6061 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0131 16:29:43.191358 6061 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0131 16:29:43.190744 6061 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0131 16:29:43.191436 6061 handler.go:208] Removed *v1.NetworkPolicy ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.835129 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.842129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.842161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.842172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.842190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.842201 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.853049 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.870159 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.881301 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.897207 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.909175 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.926008 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.941077 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.947552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.947601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.947621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.947642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.947657 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:52Z","lastTransitionTime":"2026-01-31T16:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:52 crc kubenswrapper[4769]: I0131 16:29:52.951741 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.051255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.051309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.051324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.051342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.051354 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.154084 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.154161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.154185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.154217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.154241 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.256905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.256976 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.257001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.257032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.257055 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.360009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.360070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.360087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.360109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.360126 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.463058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.463122 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.463139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.463163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.463180 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.565310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.565377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.565403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.565431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.565449 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668773 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 04:52:18.278202008 +0000 UTC Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.668931 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.771960 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.772025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.772043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.772069 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.772086 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.874818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.874910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.874930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.874955 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.874973 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.977486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.977583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.977604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.977628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:53 crc kubenswrapper[4769]: I0131 16:29:53.977646 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:53Z","lastTransitionTime":"2026-01-31T16:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.081229 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.081283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.081303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.081327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.081344 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.184165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.184225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.184241 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.184267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.184284 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.286983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.287029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.287041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.287057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.287070 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.389549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.389584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.389592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.389605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.389614 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.491961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.492041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.492067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.492097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.492120 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.595014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.595079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.595093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.595108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.595119 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.670034 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 00:33:51.231651012 +0000 UTC Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.697018 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.697102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.697119 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.697136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.697150 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.707380 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.707416 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.707422 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.707415 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:54 crc kubenswrapper[4769]: E0131 16:29:54.708032 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:54 crc kubenswrapper[4769]: E0131 16:29:54.707822 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:54 crc kubenswrapper[4769]: E0131 16:29:54.708248 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:54 crc kubenswrapper[4769]: E0131 16:29:54.708484 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.799153 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.799215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.799234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.799260 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.799279 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.902220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.902250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.902258 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.902271 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:54 crc kubenswrapper[4769]: I0131 16:29:54.902281 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:54Z","lastTransitionTime":"2026-01-31T16:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.004774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.004818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.004828 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.004843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.004867 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.107380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.107426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.107441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.107460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.107474 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.210912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.210983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.211001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.211026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.211044 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.313957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.314002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.314020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.314045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.314062 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.322464 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:55 crc kubenswrapper[4769]: E0131 16:29:55.322745 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:55 crc kubenswrapper[4769]: E0131 16:29:55.322873 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:03.322836192 +0000 UTC m=+51.397005051 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.416178 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.416244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.416261 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.416286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.416302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.518381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.518434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.518443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.518454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.518464 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.621739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.621776 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.621785 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.621800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.621810 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.671042 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 20:09:07.845264241 +0000 UTC Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.724452 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.724484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.724508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.724520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.724529 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.827434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.827549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.827577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.827605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.827628 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.930586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.930636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.930654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.930677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:55 crc kubenswrapper[4769]: I0131 16:29:55.930695 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:55Z","lastTransitionTime":"2026-01-31T16:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.033542 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.033605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.033628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.033661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.033682 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.136932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.137010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.137035 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.137064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.137087 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.240478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.240576 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.240595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.240620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.240639 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.343701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.343762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.343777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.343798 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.343814 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.446721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.446789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.446806 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.446837 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.446855 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.549593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.549657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.549673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.549697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.549714 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.653307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.653363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.653378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.653398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.653412 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.672070 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 05:19:38.533856437 +0000 UTC Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.707593 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.707621 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:56 crc kubenswrapper[4769]: E0131 16:29:56.707846 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.707904 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:56 crc kubenswrapper[4769]: E0131 16:29:56.707955 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.707628 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:56 crc kubenswrapper[4769]: E0131 16:29:56.708104 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:56 crc kubenswrapper[4769]: E0131 16:29:56.708292 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.757082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.757146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.757163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.757186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.757206 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.864732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.864799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.864816 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.864903 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.864925 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.968268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.968331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.968349 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.968374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:56 crc kubenswrapper[4769]: I0131 16:29:56.968391 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:56Z","lastTransitionTime":"2026-01-31T16:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.071455 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.071570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.071588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.071614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.071633 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.174163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.174243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.174256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.174275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.174312 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.277102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.277145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.277154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.277167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.277177 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.381060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.381125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.381141 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.381167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.381185 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.484352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.484433 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.484457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.484488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.484541 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.588169 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.588255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.588273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.588305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.588328 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.672702 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 19:58:05.201968984 +0000 UTC Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.692055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.692126 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.692146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.692177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.692202 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.708345 4769 scope.go:117] "RemoveContainer" containerID="3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.729277 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.747786 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.769430 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.790288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.794270 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.794301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.794348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.794370 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.794443 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.809047 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.829863 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.848859 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.864450 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.885920 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.897511 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.897569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.897582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.897605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.897620 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:57Z","lastTransitionTime":"2026-01-31T16:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.904646 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.922474 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.939409 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.961069 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.985520 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:57 crc kubenswrapper[4769]: I0131 16:29:57.997696 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:57Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.000101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.000146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.000158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.000179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.000192 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.017998 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.036663 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/1.log" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.038557 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.038665 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.050842 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.062911 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.076808 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.097044 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.102332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.102369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.102381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.102398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.102411 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.112684 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.126606 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.143644 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.162882 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.181848 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.202751 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.204445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.204533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.204556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.204584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.204613 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.222811 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.239265 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.251440 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.262434 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.273242 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.290085 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:58Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.306796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.306836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.306845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.306862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.306873 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.409161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.409207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.409220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.409238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.409251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.511331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.511369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.511377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.511392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.511401 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.614786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.614833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.614849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.614873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.614890 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.673301 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 10:41:24.287534157 +0000 UTC Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.710342 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:58 crc kubenswrapper[4769]: E0131 16:29:58.710456 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.710552 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:29:58 crc kubenswrapper[4769]: E0131 16:29:58.710616 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.710665 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:29:58 crc kubenswrapper[4769]: E0131 16:29:58.710715 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.710755 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:29:58 crc kubenswrapper[4769]: E0131 16:29:58.710801 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.718322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.718371 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.718384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.718405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.718422 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.821889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.821936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.821948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.821970 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.821983 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.924938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.925008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.925031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.925058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:58 crc kubenswrapper[4769]: I0131 16:29:58.925078 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:58Z","lastTransitionTime":"2026-01-31T16:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.028093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.028139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.028151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.028167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.028178 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.044700 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/2.log" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.045469 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/1.log" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.049301 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" exitCode=1 Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.049348 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.049397 4769 scope.go:117] "RemoveContainer" containerID="3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.050599 4769 scope.go:117] "RemoveContainer" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.050863 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.081654 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.100556 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.107859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.108006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.108028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.108064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.108088 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.121634 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.125416 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.130931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.130989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.131007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.131036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.131055 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.138861 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.148668 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.154169 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.154263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.154321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.154346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.154447 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.158384 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.175912 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.178727 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.187215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.187276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.187296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.187321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.187339 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.192867 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.208341 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.213430 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.213532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.213556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.213588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.213609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.215607 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.231237 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.235601 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.235958 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.238081 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.238161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.238183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.238211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.238235 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.252530 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.269274 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.282922 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.297726 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.314810 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.329335 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.342701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.342757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.342775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.342800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.342817 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.346848 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.446189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.446252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.446273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.446299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.446320 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.549547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.549593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.549604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.549622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.549635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.652620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.652686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.652705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.652730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.652748 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.672611 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.674418 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 17:49:24.935276569 +0000 UTC Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.689584 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.701397 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.740398 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:29:59 crc kubenswrapper[4769]: E0131 16:29:59.740644 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.748907 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.755527 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.755567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.755578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.755597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.755609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.779667 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e7b26b263ec2549303e59d543bdf302fea206a2ef416b96a1a56218edc92774\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:45Z\\\",\\\"message\\\":\\\"ift-operator-lifecycle-manager/catalog-operator-metrics template LB for network=default: []services.LB{}\\\\nI0131 16:29:45.104699 6213 services_controller.go:454] Service openshift-operator-lifecycle-manager/catalog-operator-metrics for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI0131 16:29:45.104688 6213 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.138\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0131 16:29:45.104744 6213 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.798027 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.817557 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.838647 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.842090 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.857995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.858060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.858078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.858102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.858118 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.861947 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.879932 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.896872 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.920911 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.942643 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.961874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.961936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.961956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.961986 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.962002 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:29:59Z","lastTransitionTime":"2026-01-31T16:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.963529 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:29:59 crc kubenswrapper[4769]: I0131 16:29:59.980557 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.001004 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:29:59Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.021844 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.040069 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.056698 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/2.log" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.062434 4769 scope.go:117] "RemoveContainer" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" Jan 31 16:30:00 crc kubenswrapper[4769]: E0131 16:30:00.062942 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.065653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.065740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.065773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.065797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.065822 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.085744 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.105232 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.121051 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.147288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.169813 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.169874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.169888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.169911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.169926 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.197313 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.211288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.226789 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.242275 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.254791 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.271814 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.272022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.272095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.272162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.272224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.276415 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.288215 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.298257 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.312272 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.322892 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.335149 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.346524 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.358583 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:00Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.374353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.374387 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.374399 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.374416 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.374428 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.477739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.477782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.477796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.477817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.477830 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.580975 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.581033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.581045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.581067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.581079 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.675585 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 14:59:22.73413846 +0000 UTC Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.684211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.684277 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.684303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.684333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.684358 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.707171 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.707255 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.707278 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:00 crc kubenswrapper[4769]: E0131 16:30:00.707370 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:00 crc kubenswrapper[4769]: E0131 16:30:00.707478 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:00 crc kubenswrapper[4769]: E0131 16:30:00.707657 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.786959 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.786999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.787009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.787026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.787036 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.890682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.890727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.890743 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.890766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.890782 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.993389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.993445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.993520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.993548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:00 crc kubenswrapper[4769]: I0131 16:30:00.993569 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:00Z","lastTransitionTime":"2026-01-31T16:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.096785 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.096860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.096882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.096908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.096927 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.199932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.199991 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.200014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.200043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.200066 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.307997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.308074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.308106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.308133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.308150 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.410833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.411274 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.411432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.411630 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.411790 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.514403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.514465 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.514482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.514535 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.514553 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.617443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.617576 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.617587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.617600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.617609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.675973 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 12:42:38.943588938 +0000 UTC Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.707254 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:01 crc kubenswrapper[4769]: E0131 16:30:01.707428 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.720177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.720264 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.720283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.720336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.720357 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.823268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.823330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.823351 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.823376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.823393 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.926584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.926647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.926667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.926695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:01 crc kubenswrapper[4769]: I0131 16:30:01.926717 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:01Z","lastTransitionTime":"2026-01-31T16:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.030151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.030247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.030268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.030325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.030346 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.132723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.132751 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.132759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.132771 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.132782 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.235424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.235453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.235463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.235479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.235491 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.337803 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.337868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.337881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.337897 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.337912 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.440778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.440842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.440860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.440888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.440908 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.543896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.544621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.544797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.544942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.545108 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.648840 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.648901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.648931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.648961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.648978 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.676344 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 09:18:55.022833233 +0000 UTC Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.707301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:02 crc kubenswrapper[4769]: E0131 16:30:02.707685 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.707429 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:02 crc kubenswrapper[4769]: E0131 16:30:02.708107 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.707356 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:02 crc kubenswrapper[4769]: E0131 16:30:02.708481 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.728829 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.745455 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.751696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.751753 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.751773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.751801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.751826 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.768867 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.788730 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.815418 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.840641 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.854741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.855594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.855621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.855648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.855665 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.859881 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.877975 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.893614 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.912406 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.931311 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.947337 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.958167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.958211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.958228 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.958253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.958270 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:02Z","lastTransitionTime":"2026-01-31T16:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.963824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.978922 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:02 crc kubenswrapper[4769]: I0131 16:30:02.999076 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:02Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.018482 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:03Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.051027 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:03Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.061050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.061148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.061175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.061212 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.061238 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.164847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.164915 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.164931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.164957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.164974 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.268844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.269178 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.269313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.269447 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.269619 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.373148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.373215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.373233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.373266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.373291 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.414193 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:03 crc kubenswrapper[4769]: E0131 16:30:03.414487 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:03 crc kubenswrapper[4769]: E0131 16:30:03.414675 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:19.414644155 +0000 UTC m=+67.488812864 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.477421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.477490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.477572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.477598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.477619 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.581016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.581072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.581090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.581115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.581132 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.676543 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 10:35:50.269917297 +0000 UTC Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.684232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.684422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.684645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.684942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.685162 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.707801 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:03 crc kubenswrapper[4769]: E0131 16:30:03.707989 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.788000 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.788068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.788092 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.788122 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.788145 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.891311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.891363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.891376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.891395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.891409 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.993628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.993676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.993690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.993709 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:03 crc kubenswrapper[4769]: I0131 16:30:03.993724 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:03Z","lastTransitionTime":"2026-01-31T16:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.096352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.096415 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.096426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.096441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.096453 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.202332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.202729 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.202881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.203039 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.203177 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.306470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.306698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.306725 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.306750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.306769 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.410836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.410932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.410953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.410976 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.410996 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.514027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.514080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.514098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.514123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.514140 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.524224 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.524376 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:30:36.52435172 +0000 UTC m=+84.598520419 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.524651 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.524805 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.525323 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:36.525302917 +0000 UTC m=+84.599471626 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.525433 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.525603 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.525672 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:36.525658396 +0000 UTC m=+84.599827105 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.617948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.618019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.618044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.618073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.618096 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.627306 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.627396 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627618 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627654 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627699 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627719 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627779 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:36.627758488 +0000 UTC m=+84.701927197 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627658 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627832 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.627920 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:36.627900312 +0000 UTC m=+84.702069011 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.677592 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 19:33:59.989409446 +0000 UTC Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.708135 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.708215 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.708374 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.708405 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.708603 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:04 crc kubenswrapper[4769]: E0131 16:30:04.708854 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.720797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.720922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.720945 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.720967 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.720988 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.824973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.825032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.825055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.825083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.825106 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.928321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.928383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.928399 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.928426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:04 crc kubenswrapper[4769]: I0131 16:30:04.928442 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:04Z","lastTransitionTime":"2026-01-31T16:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.031103 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.031239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.031268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.031298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.031320 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.134121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.134584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.134604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.134648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.134666 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.238847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.238908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.238925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.238949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.238965 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.341590 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.341671 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.341696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.341724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.341743 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.444689 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.444746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.444762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.444784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.444799 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.548321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.548407 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.548427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.548487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.548544 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.650143 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.650205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.650225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.650255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.650273 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.678632 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 12:39:54.526934708 +0000 UTC Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.707910 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:05 crc kubenswrapper[4769]: E0131 16:30:05.708026 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.753107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.753166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.753175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.753190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.753199 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.856048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.856073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.856080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.856094 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.856103 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.957592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.957665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.957684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.957705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:05 crc kubenswrapper[4769]: I0131 16:30:05.957721 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:05Z","lastTransitionTime":"2026-01-31T16:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.060433 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.060529 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.060568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.060599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.060619 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.163144 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.163217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.163235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.163262 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.163280 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.266002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.266067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.266091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.266119 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.266140 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.369687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.369757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.369775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.369798 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.369815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.472839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.472909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.472931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.472963 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.472985 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.576448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.576564 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.576586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.576612 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.576630 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678724 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 23:02:52.421603817 +0000 UTC Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678753 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678904 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.678959 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.708153 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.708269 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.708532 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:06 crc kubenswrapper[4769]: E0131 16:30:06.708612 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:06 crc kubenswrapper[4769]: E0131 16:30:06.708904 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:06 crc kubenswrapper[4769]: E0131 16:30:06.709113 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.781876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.781933 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.781955 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.781984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.782004 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.885165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.885228 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.885251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.885280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.885304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.987648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.987712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.987735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.987764 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:06 crc kubenswrapper[4769]: I0131 16:30:06.987785 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:06Z","lastTransitionTime":"2026-01-31T16:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.090549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.090642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.090656 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.090674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.090687 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.193586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.193655 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.193680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.193710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.193733 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.295970 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.296026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.296048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.296076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.296097 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.399203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.399251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.399267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.399334 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.399349 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.501305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.501374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.501396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.501422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.501443 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.603888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.603924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.603934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.603948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.603958 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.679260 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 00:36:07.553313554 +0000 UTC Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.706211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.706247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.706258 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.706273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.706283 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.707083 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:07 crc kubenswrapper[4769]: E0131 16:30:07.707252 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.810269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.810365 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.810381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.810404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.810453 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.913895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.913968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.913996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.914024 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:07 crc kubenswrapper[4769]: I0131 16:30:07.914041 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:07Z","lastTransitionTime":"2026-01-31T16:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.016802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.016850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.016861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.016876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.016886 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.121104 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.121182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.121195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.121210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.121240 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.224288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.224327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.224337 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.224354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.224365 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.327449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.327927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.328102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.328251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.328391 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.431559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.431617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.431637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.431661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.431683 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.533236 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.533289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.533544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.536702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.536724 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.639547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.639615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.639639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.639669 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.639689 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.679878 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 21:41:38.78749985 +0000 UTC Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.707261 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.707375 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:08 crc kubenswrapper[4769]: E0131 16:30:08.707449 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.707284 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:08 crc kubenswrapper[4769]: E0131 16:30:08.707557 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:08 crc kubenswrapper[4769]: E0131 16:30:08.707717 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.742705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.742782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.742801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.742827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.742847 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.845135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.845379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.845396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.845418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.845434 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.947468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.947526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.947537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.947557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:08 crc kubenswrapper[4769]: I0131 16:30:08.947569 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:08Z","lastTransitionTime":"2026-01-31T16:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.050125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.050470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.050602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.050701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.050788 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.153537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.153927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.154126 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.154327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.154551 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.257609 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.257876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.257944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.258005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.258066 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.360352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.360382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.360393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.360406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.360415 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.462429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.462521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.462539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.462556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.462571 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.491364 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.491417 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.491434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.491456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.491472 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.505296 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:09Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.509444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.509530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.509551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.509575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.509591 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.570766 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:09Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.574807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.574974 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.575051 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.575115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.575197 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.588328 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:09Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.592125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.592182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.592190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.592204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.592213 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.605661 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:09Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.608667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.608691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.608699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.608713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.608722 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.619802 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:09Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.620020 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.621196 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.621218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.621226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.621239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.621248 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.680297 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 10:33:42.71514603 +0000 UTC Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.707386 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:09 crc kubenswrapper[4769]: E0131 16:30:09.707575 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.723782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.723823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.723837 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.723853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.723864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.826031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.826068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.826079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.826095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.826108 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.928853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.928965 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.928985 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.929019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:09 crc kubenswrapper[4769]: I0131 16:30:09.929036 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:09Z","lastTransitionTime":"2026-01-31T16:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.032557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.032620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.032637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.032662 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.032683 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.136449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.136547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.136566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.136591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.136609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.239422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.239475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.239560 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.239588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.239608 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.342575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.342649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.342670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.342693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.342710 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.446333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.446401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.446424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.446458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.446481 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.549436 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.549550 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.549581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.549612 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.549634 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.652573 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.652637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.652664 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.652696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.652718 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.681228 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 17:20:55.115297291 +0000 UTC Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.707150 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.707226 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.707307 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:10 crc kubenswrapper[4769]: E0131 16:30:10.708051 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:10 crc kubenswrapper[4769]: E0131 16:30:10.708177 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:10 crc kubenswrapper[4769]: E0131 16:30:10.708271 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.755344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.755488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.755551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.755578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.755595 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.891727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.891793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.891809 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.891836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.891852 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.994455 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.995204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.995387 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.995622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:10 crc kubenswrapper[4769]: I0131 16:30:10.995842 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:10Z","lastTransitionTime":"2026-01-31T16:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.097999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.098296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.098404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.098508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.098612 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.200873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.200923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.200946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.200971 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.200992 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.303832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.303882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.303901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.303923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.303940 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.406883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.407292 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.407453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.407669 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.407839 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.511079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.511546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.511740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.511892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.512106 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.615478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.615594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.615613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.615640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.615656 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.682455 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 15:44:03.6498925 +0000 UTC Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.707640 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:11 crc kubenswrapper[4769]: E0131 16:30:11.707858 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.718683 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.718758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.718777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.718809 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.718829 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.822681 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.822749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.822768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.822797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.822818 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.925825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.925893 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.925916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.925943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:11 crc kubenswrapper[4769]: I0131 16:30:11.925961 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:11Z","lastTransitionTime":"2026-01-31T16:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.029763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.030065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.030157 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.030269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.030366 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.133268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.133719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.133875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.134029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.134227 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.237252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.237316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.237340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.237371 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.237393 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.340464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.340796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.340888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.340987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.341075 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.444572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.444626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.444646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.444670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.444688 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.548851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.548898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.548907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.548924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.548933 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.651207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.651287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.651307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.651340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.651361 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.682991 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 08:09:59.802555586 +0000 UTC Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.707281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.707286 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:12 crc kubenswrapper[4769]: E0131 16:30:12.707825 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.707303 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:12 crc kubenswrapper[4769]: E0131 16:30:12.707973 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:12 crc kubenswrapper[4769]: E0131 16:30:12.708090 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.723960 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.742178 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.754009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.754286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.754464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.754694 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.754863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.761746 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.783865 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.805437 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.829755 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.851172 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.857863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.857919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.857947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.857984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.858010 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.873743 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.890131 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.906811 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.928230 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.943952 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.962085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.962208 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.962224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.962290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.962315 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:12Z","lastTransitionTime":"2026-01-31T16:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.963548 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:12 crc kubenswrapper[4769]: I0131 16:30:12.991630 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:12Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.008139 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:13Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.042630 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:13Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.062481 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:13Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.064691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.064725 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.064739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.064759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.064772 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.167192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.167246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.167261 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.167285 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.167302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.269995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.270093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.270127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.270166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.270193 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.373098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.373137 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.373145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.373158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.373167 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.476931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.477057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.477090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.477129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.477156 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.580996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.581063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.581082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.581108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.581127 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.683191 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 19:29:49.419474419 +0000 UTC Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.684906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.684992 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.685006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.685029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.685045 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.707924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:13 crc kubenswrapper[4769]: E0131 16:30:13.708132 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.788813 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.788863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.789959 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.789993 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.790005 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.892379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.892424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.892435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.892452 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.892463 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.995038 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.995085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.995097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.995115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:13 crc kubenswrapper[4769]: I0131 16:30:13.995131 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:13Z","lastTransitionTime":"2026-01-31T16:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.097602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.097651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.097665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.097684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.097697 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.199631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.199669 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.199680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.199700 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.199712 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.302208 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.302276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.302296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.302322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.302338 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.405128 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.405179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.405195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.405218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.405237 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.507273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.507324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.507336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.507358 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.507370 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.609899 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.609941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.609953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.609970 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.609983 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.684259 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 08:07:17.529099544 +0000 UTC Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.707717 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:14 crc kubenswrapper[4769]: E0131 16:30:14.707845 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.708568 4769 scope.go:117] "RemoveContainer" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" Jan 31 16:30:14 crc kubenswrapper[4769]: E0131 16:30:14.708718 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.708843 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:14 crc kubenswrapper[4769]: E0131 16:30:14.708894 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.709113 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:14 crc kubenswrapper[4769]: E0131 16:30:14.709162 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.712438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.712465 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.712478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.712538 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.712553 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.815760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.815811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.815828 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.815852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.815870 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.918963 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.919001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.919010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.919024 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:14 crc kubenswrapper[4769]: I0131 16:30:14.919034 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:14Z","lastTransitionTime":"2026-01-31T16:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.021598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.021635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.021646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.021663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.021675 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.124414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.124449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.124457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.124470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.124478 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.226812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.226843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.226854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.226866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.226876 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.329246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.329325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.329351 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.329382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.329403 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.432487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.432561 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.432599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.432616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.432628 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.536187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.536271 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.536367 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.536422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.536450 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.639698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.639780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.639844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.639868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.639885 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.685190 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 19:07:09.826583589 +0000 UTC Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.707223 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:15 crc kubenswrapper[4769]: E0131 16:30:15.707351 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.742617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.742684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.742703 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.742729 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.742748 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.845777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.845805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.845813 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.845826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.845853 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.948961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.949013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.949022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.949034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:15 crc kubenswrapper[4769]: I0131 16:30:15.949043 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:15Z","lastTransitionTime":"2026-01-31T16:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.052467 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.052534 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.052544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.052561 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.052569 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.156894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.156930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.156941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.156958 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.156969 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.259572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.259601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.259629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.259648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.259659 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.362380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.362450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.362462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.362479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.362491 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.464874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.464912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.464924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.464940 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.464950 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.568280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.568338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.568354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.568377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.568394 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.671562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.672114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.672317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.672449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.672626 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.685912 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 04:45:58.162989235 +0000 UTC Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.707308 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.707361 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:16 crc kubenswrapper[4769]: E0131 16:30:16.707544 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.707590 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:16 crc kubenswrapper[4769]: E0131 16:30:16.707754 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:16 crc kubenswrapper[4769]: E0131 16:30:16.707869 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.774723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.774774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.774792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.774811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.774826 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.877102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.877203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.877223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.877247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.877265 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.980585 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.980628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.980637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.980651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:16 crc kubenswrapper[4769]: I0131 16:30:16.980659 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:16Z","lastTransitionTime":"2026-01-31T16:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.083865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.083928 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.083946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.083969 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.083986 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.187782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.187828 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.187837 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.187854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.187863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.294700 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.294755 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.294767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.294786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.294798 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.396438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.396521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.396533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.396548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.396558 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.499420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.499537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.499556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.499584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.499603 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.602450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.602529 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.602539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.602556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.602566 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.686044 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 14:10:40.828290551 +0000 UTC Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.704746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.704792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.704804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.704821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.704833 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.707994 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:17 crc kubenswrapper[4769]: E0131 16:30:17.708126 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.807852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.807892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.807901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.807919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.807928 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.910295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.910339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.910348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.910363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:17 crc kubenswrapper[4769]: I0131 16:30:17.910372 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:17Z","lastTransitionTime":"2026-01-31T16:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.013476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.013543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.013553 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.013571 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.013580 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.118648 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.118724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.118746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.118778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.118799 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.221686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.221758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.221778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.221802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.221816 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.325565 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.325612 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.325622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.325637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.325647 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.428670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.428734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.428763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.428793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.428815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.531883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.531958 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.531978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.532004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.532022 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.634879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.635203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.635331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.635468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.635661 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.686676 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 00:50:59.840877311 +0000 UTC Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.708346 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.708931 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.708992 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:18 crc kubenswrapper[4769]: E0131 16:30:18.709111 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:18 crc kubenswrapper[4769]: E0131 16:30:18.709392 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:18 crc kubenswrapper[4769]: E0131 16:30:18.709598 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.738692 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.738748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.738763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.738783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.738796 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.842760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.842836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.842854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.842888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.842910 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.947275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.947353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.947373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.947405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:18 crc kubenswrapper[4769]: I0131 16:30:18.947429 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:18Z","lastTransitionTime":"2026-01-31T16:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.050766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.050940 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.050957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.050988 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.051008 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.154273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.154325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.154334 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.154353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.154364 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.257242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.257350 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.257374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.257410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.257436 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.361210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.361287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.361305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.361333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.361356 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.442369 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.442703 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.442893 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:30:51.442854995 +0000 UTC m=+99.517023904 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.464555 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.464609 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.464622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.464640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.464654 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.568488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.568880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.568894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.568918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.568930 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.651757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.651973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.651996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.652022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.652042 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.665878 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:19Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.670821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.670882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.670905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.670937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.670960 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.711401 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.711774 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.712415 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 22:31:41.160122088 +0000 UTC Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.725840 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:19Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.729937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.729999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.730024 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.730074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.730104 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.746189 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:19Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.750923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.751048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.751107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.751138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.751160 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.769279 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:19Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.773492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.773592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.773613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.773642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.773660 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.789401 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:19Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:19 crc kubenswrapper[4769]: E0131 16:30:19.789552 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.791575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.791610 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.791619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.791633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.791644 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.894162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.894198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.894206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.894221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.894230 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.997268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.997320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.997338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.997359 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:19 crc kubenswrapper[4769]: I0131 16:30:19.997377 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:19Z","lastTransitionTime":"2026-01-31T16:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.100279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.100360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.100379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.100405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.100425 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.203800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.203857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.203874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.203899 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.203916 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.306233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.306273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.306282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.306297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.306306 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.408890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.408954 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.408971 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.408996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.409012 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.512436 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.512552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.512578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.512616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.512637 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.616152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.616220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.616243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.616272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.616300 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.708117 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.708154 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.708162 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:20 crc kubenswrapper[4769]: E0131 16:30:20.708322 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:20 crc kubenswrapper[4769]: E0131 16:30:20.708443 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:20 crc kubenswrapper[4769]: E0131 16:30:20.708676 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.712559 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 13:42:34.69001039 +0000 UTC Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.718767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.718806 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.718822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.718846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.718864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.821057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.821087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.821103 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.821124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.821140 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.924039 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.924089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.924102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.924118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:20 crc kubenswrapper[4769]: I0131 16:30:20.924131 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:20Z","lastTransitionTime":"2026-01-31T16:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.026450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.026530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.026547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.026569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.026584 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.132923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.132977 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.132990 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.133008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.133019 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.137528 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/0.log" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.137579 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a7cfe09-9892-494d-a420-5d720afb3df3" containerID="e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f" exitCode=1 Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.137611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerDied","Data":"e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.138002 4769 scope.go:117] "RemoveContainer" containerID="e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.157304 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.169386 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.183631 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.196287 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.211437 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.222376 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.235558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.235592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.235602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.235618 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.235631 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.238088 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.249775 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.260981 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.274656 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.292321 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:21Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.313063 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.328174 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.337791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.337820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.337828 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.337841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.337850 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.346267 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.361571 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.387562 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.403093 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:21Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.440080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.440110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.440120 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.440134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.440144 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.543113 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.543184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.543206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.543231 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.543249 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.646064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.646097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.646107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.646121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.646130 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.707753 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:21 crc kubenswrapper[4769]: E0131 16:30:21.707929 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.713593 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 06:05:54.287348755 +0000 UTC Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.748383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.748425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.748434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.748448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.748459 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.851090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.851136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.851152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.851175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.851195 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.953211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.953253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.953269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.953288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:21 crc kubenswrapper[4769]: I0131 16:30:21.953305 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:21Z","lastTransitionTime":"2026-01-31T16:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.055418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.055457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.055467 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.055482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.055512 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.143060 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/0.log" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.143117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerStarted","Data":"eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.157828 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.158290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.158353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.158373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.158409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.158427 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.175894 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.186639 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.206264 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.219575 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.237078 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.253004 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.260823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.260852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.260862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.260877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.260888 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.271823 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.288201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.306131 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.316041 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.327901 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.338236 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.354794 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.363358 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.363402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.363418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.363442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.363459 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.366611 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.379102 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.390400 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.465169 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.465222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.465235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.465253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.465266 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.567382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.567430 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.567442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.567453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.567461 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.668908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.668956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.668972 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.668994 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.669009 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.707667 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.707725 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.707739 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:22 crc kubenswrapper[4769]: E0131 16:30:22.707811 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:22 crc kubenswrapper[4769]: E0131 16:30:22.707968 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:22 crc kubenswrapper[4769]: E0131 16:30:22.708092 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.713865 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 22:43:08.165954563 +0000 UTC Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.726060 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.742508 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.755107 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.769258 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.770837 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.770874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.770888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.770910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.770924 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.782823 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.795687 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.810141 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.826269 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.839001 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.859471 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.871070 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.873463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.873515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.873527 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.873545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.873557 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.883879 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.896621 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.909661 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.922389 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.932347 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.944565 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:22Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.976543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.976583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.976592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.976608 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:22 crc kubenswrapper[4769]: I0131 16:30:22.976617 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:22Z","lastTransitionTime":"2026-01-31T16:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.079515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.079563 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.079572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.079588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.079600 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.185222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.185286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.185302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.185327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.185344 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.288690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.288751 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.288778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.288808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.288832 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.391463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.391521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.391529 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.391544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.391553 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.493820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.493849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.493857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.493870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.493878 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.596843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.596878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.596886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.596900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.596909 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.699597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.699642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.699650 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.699667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.699676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.707277 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:23 crc kubenswrapper[4769]: E0131 16:30:23.707406 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.714665 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 11:22:32.044086946 +0000 UTC Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.802133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.802176 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.802187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.802204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.802216 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.905419 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.905478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.905521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.905546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:23 crc kubenswrapper[4769]: I0131 16:30:23.905563 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:23Z","lastTransitionTime":"2026-01-31T16:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.007801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.007836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.007845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.007858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.007867 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.110456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.110514 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.110526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.110540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.110549 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.213468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.213522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.213531 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.213546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.213556 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.316656 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.316732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.316750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.317239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.317302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.420557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.420630 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.420651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.420676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.420694 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.522626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.522679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.522692 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.522714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.522740 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.625756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.625821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.625862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.625892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.625910 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.707708 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.707764 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:24 crc kubenswrapper[4769]: E0131 16:30:24.707932 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.708154 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:24 crc kubenswrapper[4769]: E0131 16:30:24.708905 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:24 crc kubenswrapper[4769]: E0131 16:30:24.708730 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.714859 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 10:34:48.844309374 +0000 UTC Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.728054 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.728099 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.728116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.728136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.728153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.831007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.831080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.831105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.831130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.831150 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.934623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.934720 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.934741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.934766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:24 crc kubenswrapper[4769]: I0131 16:30:24.934784 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:24Z","lastTransitionTime":"2026-01-31T16:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.038154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.038223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.038240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.038264 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.038282 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.140883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.140916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.140924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.140939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.140949 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.243530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.243566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.243579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.243594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.243603 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.346234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.346267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.346278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.346295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.346307 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.448583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.448624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.448633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.448649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.448658 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.550461 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.550551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.550574 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.550605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.550630 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.652879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.652916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.652926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.652940 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.652951 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.707209 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:25 crc kubenswrapper[4769]: E0131 16:30:25.707324 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.715096 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 05:01:58.551944966 +0000 UTC Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.754801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.754843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.754852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.754867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.754876 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.858196 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.858256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.858268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.858288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.858302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.961867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.961939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.961952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.961971 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:25 crc kubenswrapper[4769]: I0131 16:30:25.961988 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:25Z","lastTransitionTime":"2026-01-31T16:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.064953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.065014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.065029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.065053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.065068 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.167805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.167858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.167872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.167891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.167905 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.270906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.270968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.270986 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.271014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.271031 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.378129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.378302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.378480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.379299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.379345 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.481846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.481891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.481910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.481934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.481952 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.585151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.585205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.585226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.585268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.585286 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.688586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.688654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.688674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.688702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.688726 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.708235 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.708366 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:26 crc kubenswrapper[4769]: E0131 16:30:26.708443 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.708490 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:26 crc kubenswrapper[4769]: E0131 16:30:26.708706 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:26 crc kubenswrapper[4769]: E0131 16:30:26.708926 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.715302 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 14:07:00.961734317 +0000 UTC Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.792321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.792366 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.792378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.792402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.792416 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.894949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.895011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.895035 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.895064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.895088 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.998126 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.998209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.998233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.998268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:26 crc kubenswrapper[4769]: I0131 16:30:26.998290 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:26Z","lastTransitionTime":"2026-01-31T16:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.101183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.101214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.101226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.101243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.101259 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.204475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.204548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.204568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.204592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.204611 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.308557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.308700 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.308721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.308750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.308774 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.411458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.411569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.411589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.411615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.411635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.514188 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.514282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.514302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.514335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.514358 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.616835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.616914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.616934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.616964 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.616982 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.708163 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:27 crc kubenswrapper[4769]: E0131 16:30:27.708361 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.716430 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 04:59:34.407651776 +0000 UTC Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.719642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.719709 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.719722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.719739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.719752 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.822696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.822747 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.822761 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.822778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.822792 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.926921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.927004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.927025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.927060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:27 crc kubenswrapper[4769]: I0131 16:30:27.927081 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:27Z","lastTransitionTime":"2026-01-31T16:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.029835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.029921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.029945 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.029978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.030007 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.132967 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.133030 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.133047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.133076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.133093 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.235346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.235410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.235429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.235454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.235472 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.338214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.338276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.338294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.338320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.338343 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.441717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.441784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.441802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.441830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.441848 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.544531 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.544585 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.544602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.544624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.544641 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.647833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.647899 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.647918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.647943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.647958 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.707234 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.707249 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.707299 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:28 crc kubenswrapper[4769]: E0131 16:30:28.708001 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:28 crc kubenswrapper[4769]: E0131 16:30:28.708238 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:28 crc kubenswrapper[4769]: E0131 16:30:28.708296 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.708653 4769 scope.go:117] "RemoveContainer" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.716677 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 03:35:01.83865879 +0000 UTC Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.760146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.760263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.760290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.760322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.760346 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.863116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.863154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.863165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.863183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.863194 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.965679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.965912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.965921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.965933 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:28 crc kubenswrapper[4769]: I0131 16:30:28.965944 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:28Z","lastTransitionTime":"2026-01-31T16:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.068135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.068191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.068204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.068223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.068239 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.166802 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/2.log" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170138 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170228 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170270 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170285 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.170486 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.191823 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.210830 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.232476 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.245345 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.255813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.266454 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.272007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.272032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.272042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.272056 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.272067 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.276913 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.287517 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.301372 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.311647 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.325026 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.334644 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.347701 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.359965 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.370710 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.373980 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.374011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.374020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.374034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.374043 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.383571 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.397084 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.476559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.476617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.476640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.476665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.476683 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.578820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.578851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.578859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.578873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.578881 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.681023 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.681077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.681088 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.681105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.681119 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.707290 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:29 crc kubenswrapper[4769]: E0131 16:30:29.707425 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.717535 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 16:31:36.95323238 +0000 UTC Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.783350 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.783406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.783424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.783445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.783462 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.885684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.885714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.885724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.885737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.885746 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.986958 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.987003 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.987016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.987034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:29 crc kubenswrapper[4769]: I0131 16:30:29.987050 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:29Z","lastTransitionTime":"2026-01-31T16:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.026842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.026872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.026882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.026894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.026902 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.045135 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.048211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.048314 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.048397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.048477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.048564 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.059535 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.062203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.062225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.062232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.062244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.062252 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.073672 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.076680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.076737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.076750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.076767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.076778 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.087405 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.090301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.090335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.090349 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.090361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.090370 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.100902 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.101011 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.102099 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.102134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.102146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.102162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.102173 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.174310 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/3.log" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.174819 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/2.log" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.176812 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" exitCode=1 Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.176852 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.176987 4769 scope.go:117] "RemoveContainer" containerID="4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.177357 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.177545 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.192776 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.204655 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.206944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.206988 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.207005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.207029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.207047 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.217122 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.226713 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.237201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.247483 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.263805 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e0676e321ded7f968141e8b28596ec889f690e6b66a6b10151501f9f83a7372\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:29:58Z\\\",\\\"message\\\":\\\"try object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731451 6426 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc in node crc\\\\nI0131 16:29:58.731460 6426 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz\\\\nI0131 16:29:58.731476 6426 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-rftqz in node crc\\\\nI0131 16:29:58.731468 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-node-2r9tc after 0 failed attempt(s)\\\\nI0131 16:29:58.731486 6426 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-rftqz after 0 failed attempt(s)\\\\nI0131 16:29:58.731490 6426 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-node-2r9tc\\\\nF0131 16:29:58.731517 6426 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: fail\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:29Z\\\",\\\"message\\\":\\\"7.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z]\\\\nI0131 16:30:29.569813 6816 services_controller.go:452] Built service openshift-console/console per-node LB for network=default: []services.LB{}\\\\nI0131 16:30:29.570939 6816 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.273338 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.283727 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.296396 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.306785 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.309408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.309443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.309456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.309474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.309489 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.319200 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.331449 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.340597 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.353553 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.363775 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.374261 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:30Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.411675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.411712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.411722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.411734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.411743 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.514870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.515173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.515315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.515454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.515695 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.617775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.617827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.617843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.617867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.617887 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.707709 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.707745 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.707819 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.707864 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.707977 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:30 crc kubenswrapper[4769]: E0131 16:30:30.708056 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.717920 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 16:06:54.393103499 +0000 UTC Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.720593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.720670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.720691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.720715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.720742 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.822685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.822721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.822731 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.822746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.822755 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.924470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.924570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.924584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.924603 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:30 crc kubenswrapper[4769]: I0131 16:30:30.924614 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:30Z","lastTransitionTime":"2026-01-31T16:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.027211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.027277 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.027296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.027318 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.027334 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.130327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.130391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.130408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.130432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.130450 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.181948 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/3.log" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.189552 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:30:31 crc kubenswrapper[4769]: E0131 16:30:31.189893 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.208037 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.224953 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.233521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.233570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.233642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.233678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.233696 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.243797 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.273742 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:29Z\\\",\\\"message\\\":\\\"7.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z]\\\\nI0131 16:30:29.569813 6816 services_controller.go:452] Built service openshift-console/console per-node LB for network=default: []services.LB{}\\\\nI0131 16:30:29.570939 6816 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.290149 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.310765 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.329547 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.336739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.336791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.336814 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.336843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.336865 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.349589 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.369711 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.387630 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.403424 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.422738 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.439952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.440001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.440016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.440038 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.440049 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.442733 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.461569 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.481470 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.501639 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.519402 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:31Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.543083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.543136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.543155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.543180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.543197 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.646722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.646773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.646790 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.646816 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.646836 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.707484 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:31 crc kubenswrapper[4769]: E0131 16:30:31.707886 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.718945 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 14:15:24.156681261 +0000 UTC Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.724648 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.749473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.749575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.749593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.749618 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.749635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.853019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.853068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.853085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.853106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.853123 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.955632 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.955707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.955724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.955749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:31 crc kubenswrapper[4769]: I0131 16:30:31.955769 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:31Z","lastTransitionTime":"2026-01-31T16:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.059452 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.059572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.059602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.059634 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.059655 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.162737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.162946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.162974 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.163009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.163031 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.266256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.266321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.266339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.266363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.266385 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.374718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.374784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.374801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.374824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.374841 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.477859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.477907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.477923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.477947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.477964 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.580675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.580769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.580793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.580827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.580849 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.684792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.684863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.684887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.684911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.684929 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.708071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:32 crc kubenswrapper[4769]: E0131 16:30:32.708240 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.708407 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:32 crc kubenswrapper[4769]: E0131 16:30:32.708578 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.708744 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:32 crc kubenswrapper[4769]: E0131 16:30:32.708911 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.719762 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 07:07:30.636322277 +0000 UTC Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.730298 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.755803 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.774683 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.788398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.788458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.788486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.788548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.788572 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.806591 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:29Z\\\",\\\"message\\\":\\\"7.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z]\\\\nI0131 16:30:29.569813 6816 services_controller.go:452] Built service openshift-console/console per-node LB for network=default: []services.LB{}\\\\nI0131 16:30:29.570939 6816 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.826037 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.843891 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.865605 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.882859 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.890973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.891048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.891063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.891108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.891125 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.901891 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.919546 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.936606 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.953876 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.975754 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994411 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:32Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994776 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:32 crc kubenswrapper[4769]: I0131 16:30:32.994928 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:32Z","lastTransitionTime":"2026-01-31T16:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.013637 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d851433b-1b86-4634-93e2-2c413b1c4f6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0acdb96d69965a7e3f18674c45f56b62ffa359a948133793971dd31fd8df1f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.031893 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.050616 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.068435 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:33Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.098212 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.098270 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.098291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.098342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.098362 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.200830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.200909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.200960 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.201006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.201029 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.304444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.304540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.304559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.304602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.304620 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.407321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.407388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.407414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.407444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.407467 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.510703 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.511004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.511110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.511221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.511341 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.614029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.614391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.614639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.614825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.615016 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.708104 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:33 crc kubenswrapper[4769]: E0131 16:30:33.708252 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.718067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.718123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.718145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.718174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.718191 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.720213 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 15:05:22.513680075 +0000 UTC Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.820831 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.821239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.821306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.821388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.821458 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.924882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.924947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.924964 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.924989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:33 crc kubenswrapper[4769]: I0131 16:30:33.925006 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:33Z","lastTransitionTime":"2026-01-31T16:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.027686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.027935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.027977 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.028001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.028021 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.130753 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.130812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.130832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.130855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.130871 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.233967 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.234024 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.234041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.234069 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.234086 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.337863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.337926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.337945 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.337974 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.337995 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.441829 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.441892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.441909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.441936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.441968 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.544445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.544836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.544992 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.545136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.545256 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.647978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.648046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.648068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.648098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.648124 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.708021 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.708087 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:34 crc kubenswrapper[4769]: E0131 16:30:34.708156 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.708166 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:34 crc kubenswrapper[4769]: E0131 16:30:34.708259 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:34 crc kubenswrapper[4769]: E0131 16:30:34.708346 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.720896 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 11:08:51.98398256 +0000 UTC Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.750523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.750558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.750569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.750584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.750593 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.853293 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.853332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.853341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.853361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.853369 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.956288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.956320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.956329 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.956344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:34 crc kubenswrapper[4769]: I0131 16:30:34.956352 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:34Z","lastTransitionTime":"2026-01-31T16:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.058839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.058881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.058890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.058905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.058917 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.161114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.161152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.161161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.161179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.161188 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.263987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.264034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.264047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.264068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.264082 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.366921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.366964 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.366974 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.366988 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.366997 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.470278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.470331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.470349 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.470404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.470427 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.573082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.573136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.573145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.573160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.573169 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.675623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.675707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.675731 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.675763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.675786 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.707101 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:35 crc kubenswrapper[4769]: E0131 16:30:35.707273 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.721547 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 08:21:22.176984422 +0000 UTC Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.778794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.778834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.778845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.778864 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.778877 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.881083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.881161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.881189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.881222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.881244 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.983581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.983631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.983647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.983668 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:35 crc kubenswrapper[4769]: I0131 16:30:35.983682 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:35Z","lastTransitionTime":"2026-01-31T16:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.086050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.086098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.086114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.086139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.086155 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.189230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.189278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.189291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.189310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.189322 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.291210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.291280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.291302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.291328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.291348 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.394050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.394084 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.394095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.394109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.394120 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.497025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.497078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.497097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.497124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.497157 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.524650 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.524866 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.524827233 +0000 UTC m=+148.598995952 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.599638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.599691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.599707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.599732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.599751 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.625562 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.625654 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.625758 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.625785 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.625854 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.625831712 +0000 UTC m=+148.700000411 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.625881 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.625868293 +0000 UTC m=+148.700037002 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.702537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.702588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.702604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.702632 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.702657 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.707229 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.707274 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.707406 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.707457 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.707376 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.707651 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.723105 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 01:33:12.505778214 +0000 UTC Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.726789 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727029 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727072 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727092 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727157 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.72713456 +0000 UTC m=+148.801303259 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727324 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727364 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727387 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:36 crc kubenswrapper[4769]: E0131 16:30:36.727470 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.727447159 +0000 UTC m=+148.801615918 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.727047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.806339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.806403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.806425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.806456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.806477 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.909181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.909247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.909272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.909302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:36 crc kubenswrapper[4769]: I0131 16:30:36.909325 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:36Z","lastTransitionTime":"2026-01-31T16:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.012188 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.012254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.012276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.012306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.012328 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.115542 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.115613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.115631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.115658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.115676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.218068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.218374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.218462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.218677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.218781 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.322078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.322427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.322446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.322470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.322487 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.425356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.425411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.425424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.425444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.425457 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.528426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.528929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.529114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.529322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.529603 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.633016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.633314 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.633448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.633623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.633779 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.707894 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:37 crc kubenswrapper[4769]: E0131 16:30:37.708041 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.723222 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 07:35:32.168789617 +0000 UTC Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.736876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.736908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.736919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.736936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.736948 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.838863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.838923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.838942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.838966 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.838986 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.942750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.943158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.943321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.943521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:37 crc kubenswrapper[4769]: I0131 16:30:37.943669 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:37Z","lastTransitionTime":"2026-01-31T16:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.046343 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.046404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.046421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.046443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.046458 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.149774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.149860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.149885 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.149924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.149949 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.253366 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.253445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.253466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.253520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.253544 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.356570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.356662 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.356688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.356726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.356751 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.459770 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.459833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.459852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.459880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.459953 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.563408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.563476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.563528 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.563564 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.563594 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.667069 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.667128 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.667145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.667170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.667188 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.707521 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.707572 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.707547 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:38 crc kubenswrapper[4769]: E0131 16:30:38.707708 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:38 crc kubenswrapper[4769]: E0131 16:30:38.707872 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:38 crc kubenswrapper[4769]: E0131 16:30:38.708051 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.724536 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 15:33:30.456961894 +0000 UTC Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.734667 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.770182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.770221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.770232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.770250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.770262 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.873865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.873920 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.873939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.873965 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.873983 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.977264 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.977330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.977356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.977384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:38 crc kubenswrapper[4769]: I0131 16:30:38.977408 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:38Z","lastTransitionTime":"2026-01-31T16:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.080967 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.081032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.081050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.081076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.081100 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.184784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.184843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.184859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.184882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.184900 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.287379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.287434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.287454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.287477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.287530 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.390716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.390762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.390780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.390803 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.390820 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.494544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.494598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.494616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.494642 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.494660 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.597576 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.597645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.597665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.597693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.597713 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.700994 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.701059 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.701081 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.701106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.701125 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.707354 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:39 crc kubenswrapper[4769]: E0131 16:30:39.707518 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.725002 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 02:29:08.208342833 +0000 UTC Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.804628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.804690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.804707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.804736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.804754 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.908240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.908299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.908312 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.908331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:39 crc kubenswrapper[4769]: I0131 16:30:39.908346 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:39Z","lastTransitionTime":"2026-01-31T16:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.012267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.012341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.012362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.012393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.012414 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.115730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.115799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.115818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.115844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.115864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.219139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.219207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.219227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.219259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.219278 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.323013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.323087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.323115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.323147 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.323172 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.353168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.353224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.353241 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.353269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.353285 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.377650 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.384762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.384826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.384844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.384875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.384894 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.407899 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.413591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.413656 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.413681 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.413713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.413736 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.437875 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.452208 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.452271 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.452289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.452316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.452334 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.474405 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.480117 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.480162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.480179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.480202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.480220 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.511853 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:40Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.512102 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.515188 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.515271 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.515295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.515329 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.515353 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.618871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.618941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.618958 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.618984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.619003 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.708210 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.708276 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.708301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.708436 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.708736 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:40 crc kubenswrapper[4769]: E0131 16:30:40.708876 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.721406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.721470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.721519 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.721547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.721566 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.725661 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 11:47:19.775640049 +0000 UTC Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.825183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.825254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.825272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.825298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.825316 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.928895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.928968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.928985 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.929012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:40 crc kubenswrapper[4769]: I0131 16:30:40.929032 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:40Z","lastTransitionTime":"2026-01-31T16:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.032348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.032412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.032431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.032459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.032477 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.136386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.136443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.136462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.136482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.136519 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.239254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.239315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.239333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.239360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.239378 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.344939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.345010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.345037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.345070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.345093 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.448745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.449073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.449274 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.449460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.449665 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.552482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.552843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.552926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.553002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.553078 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.656296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.656328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.656339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.656354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.656364 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.708299 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:41 crc kubenswrapper[4769]: E0131 16:30:41.708830 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.726537 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 17:40:48.274434374 +0000 UTC Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.758749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.758802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.758822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.758847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.758866 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.862185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.862220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.862230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.862246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.862258 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.964891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.964941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.964950 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.964963 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:41 crc kubenswrapper[4769]: I0131 16:30:41.964972 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:41Z","lastTransitionTime":"2026-01-31T16:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.067424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.067448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.067455 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.067468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.067478 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.171198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.171275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.171286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.171303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.171316 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.273900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.274328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.274482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.275431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.275637 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.378320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.378712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.378886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.379105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.379265 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.482804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.482895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.482914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.482938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.482958 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.587149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.587224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.587246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.587290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.587309 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.690819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.690880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.690900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.690928 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.690950 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.707859 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:42 crc kubenswrapper[4769]: E0131 16:30:42.708040 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.708264 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.708339 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:42 crc kubenswrapper[4769]: E0131 16:30:42.710269 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:42 crc kubenswrapper[4769]: E0131 16:30:42.710533 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.726796 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 14:29:32.952378577 +0000 UTC Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.733489 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.753201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.779559 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.795638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.796587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.796805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.797134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.797339 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.801056 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.820439 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.842017 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.882015 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b28ca5-e4cc-4733-8fb3-f5bc2c30ee86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0717f838e9dd04fe04218d3858edda9cd76359c43d888bef60f5c6d533d35ec1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd57645c8ddb97e6fb290f81195428e2bfd6b3af321e260cd4007cadc0fc496\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d282bca47e56b022765460bd4e2c34df8d15e19b52199a1cded767fada46ae2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a52d14237bd46861c4f0ee3500a8cc3baf688f273b03262ada1439aaea53eb61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52ce0326d494aa79957bf95a8d09e34edec45f9d6145f648b035aed46693afa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.900425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.900487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.900537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.900575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.900597 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:42Z","lastTransitionTime":"2026-01-31T16:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.904068 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.926908 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.946200 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.964588 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d851433b-1b86-4634-93e2-2c413b1c4f6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0acdb96d69965a7e3f18674c45f56b62ffa359a948133793971dd31fd8df1f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:42 crc kubenswrapper[4769]: I0131 16:30:42.988738 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:42Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.004131 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.004184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.004205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.004228 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.004244 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.014841 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.036829 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.062128 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.088344 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.107898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.108448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.108678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.108883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.109040 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.110740 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.138394 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:29Z\\\",\\\"message\\\":\\\"7.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z]\\\\nI0131 16:30:29.569813 6816 services_controller.go:452] Built service openshift-console/console per-node LB for network=default: []services.LB{}\\\\nI0131 16:30:29.570939 6816 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.156829 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:43Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.212658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.212759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.212778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.212830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.212848 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.316125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.316175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.316191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.316212 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.316226 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.418554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.418794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.418861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.418941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.419030 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.521306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.521347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.521359 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.521380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.521393 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.623979 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.624038 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.624057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.624081 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.624097 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.708338 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:43 crc kubenswrapper[4769]: E0131 16:30:43.708619 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.709814 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:30:43 crc kubenswrapper[4769]: E0131 16:30:43.710120 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.726958 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 09:04:14.312067918 +0000 UTC Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.727087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.727155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.727178 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.727211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.727232 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.830820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.830892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.830910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.830936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.830957 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.934172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.934240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.934260 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.934284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:43 crc kubenswrapper[4769]: I0131 16:30:43.934302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:43Z","lastTransitionTime":"2026-01-31T16:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.037692 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.037746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.037764 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.037786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.037803 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.141417 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.141554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.141604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.141631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.141648 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.245155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.245263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.245284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.245314 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.245336 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.348683 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.348758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.348778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.348802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.348820 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.451338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.451414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.451435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.451468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.451489 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.554757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.554827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.554846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.554878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.554899 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.658008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.658079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.658098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.658128 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.658145 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.708004 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.708107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:44 crc kubenswrapper[4769]: E0131 16:30:44.708220 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:44 crc kubenswrapper[4769]: E0131 16:30:44.708306 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.708475 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:44 crc kubenswrapper[4769]: E0131 16:30:44.708862 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.728121 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 22:10:11.836210726 +0000 UTC Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.761875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.761936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.761961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.761988 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.762009 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.864430 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.864488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.864533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.864557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.864574 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.967391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.967443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.967459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.967480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:44 crc kubenswrapper[4769]: I0131 16:30:44.967533 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:44Z","lastTransitionTime":"2026-01-31T16:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.071178 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.071258 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.071280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.071315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.071337 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.174080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.174141 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.174160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.174191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.174214 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.277978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.278055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.278077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.278141 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.278161 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.381467 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.381579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.381599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.381629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.381648 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.486031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.486129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.486149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.486187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.486209 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.588943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.589026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.589052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.589087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.589110 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.692335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.692727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.692929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.693150 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.693282 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.707799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:45 crc kubenswrapper[4769]: E0131 16:30:45.708157 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.728788 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 13:27:49.359492668 +0000 UTC Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.796449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.796883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.797025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.797172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.797322 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.900697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.900759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.900775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.900801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:45 crc kubenswrapper[4769]: I0131 16:30:45.900818 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:45Z","lastTransitionTime":"2026-01-31T16:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.004916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.005211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.005287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.005386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.005475 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.109735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.109802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.109822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.109853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.109874 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.212732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.212797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.212818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.212846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.212866 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.316257 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.316335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.316358 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.316397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.316421 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.420398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.420477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.420529 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.420564 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.420589 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.523777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.523838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.523850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.523872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.523886 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.626462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.626620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.626647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.626685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.626711 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.708020 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.708101 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.708111 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:46 crc kubenswrapper[4769]: E0131 16:30:46.709026 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:46 crc kubenswrapper[4769]: E0131 16:30:46.709150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:46 crc kubenswrapper[4769]: E0131 16:30:46.709339 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.729805 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 19:54:24.616083393 +0000 UTC Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.730369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.730414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.730432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.730450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.730466 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.834361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.834436 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.834457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.834485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.834546 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.939263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.939400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.939422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.939453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:46 crc kubenswrapper[4769]: I0131 16:30:46.939472 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:46Z","lastTransitionTime":"2026-01-31T16:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.048682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.049677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.049711 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.049742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.049815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.153626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.154539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.154879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.155744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.155846 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.259540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.259613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.259635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.259662 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.259681 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.362384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.362426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.362438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.362453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.362464 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.464986 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.465019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.465029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.465043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.465053 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.568265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.568343 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.568370 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.568422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.568452 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.671981 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.672291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.672384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.672485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.672619 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.707761 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:47 crc kubenswrapper[4769]: E0131 16:30:47.708374 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.730014 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 16:02:45.101033857 +0000 UTC Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.776285 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.776340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.776360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.776389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.776409 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.879738 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.879816 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.879839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.879874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.879899 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.983707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.983784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.983812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.983848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:47 crc kubenswrapper[4769]: I0131 16:30:47.983875 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:47Z","lastTransitionTime":"2026-01-31T16:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.087072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.087226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.087252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.087281 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.087302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.190309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.190464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.190491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.190584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.190609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.294189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.294264 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.294283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.294312 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.294331 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.398237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.398318 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.398335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.398370 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.398391 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.501702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.501977 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.501995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.502026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.502051 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.605705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.605781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.605804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.605835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.605853 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.707338 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.707591 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:48 crc kubenswrapper[4769]: E0131 16:30:48.707620 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:48 crc kubenswrapper[4769]: E0131 16:30:48.707887 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.708320 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:48 crc kubenswrapper[4769]: E0131 16:30:48.708580 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.709936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.710005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.710032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.710066 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.710095 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.749889 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 21:51:09.740754516 +0000 UTC Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.813018 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.813091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.813110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.813135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.813153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.916468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.916578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.916601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.916629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:48 crc kubenswrapper[4769]: I0131 16:30:48.916652 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:48Z","lastTransitionTime":"2026-01-31T16:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.019719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.019801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.019824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.019854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.019873 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.123296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.123342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.123352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.123368 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.123379 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.227244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.227330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.227348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.227375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.227395 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.330966 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.331040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.331060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.331091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.331117 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.434732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.434789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.434799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.434817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.434828 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.537855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.537944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.537968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.538000 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.538024 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.641723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.641792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.641815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.641848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.641920 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.707376 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:49 crc kubenswrapper[4769]: E0131 16:30:49.707630 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.745428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.745602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.745626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.745651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.745672 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.750682 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 14:48:17.022240351 +0000 UTC Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.864820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.864871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.864889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.864913 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.864931 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.967796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.967855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.967873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.967902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:49 crc kubenswrapper[4769]: I0131 16:30:49.967920 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:49Z","lastTransitionTime":"2026-01-31T16:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.070427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.070469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.070479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.070496 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.070526 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.173533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.174027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.174189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.174364 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.174563 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.277475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.277990 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.278198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.278362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.278556 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.382357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.382401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.382427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.382447 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.382459 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.485919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.485973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.485984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.486002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.486013 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.588569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.588634 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.588653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.588678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.588697 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.685841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.685938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.685957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.685991 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.686020 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.704962 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:50Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.708064 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.708304 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.708060 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.708460 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.708633 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.708784 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.712042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.712075 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.712085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.712101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.712110 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.730004 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:50Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.736064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.736116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.736133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.736158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.736174 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.751633 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 23:35:36.21647309 +0000 UTC Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.755229 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:50Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.758845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.758904 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.758924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.758948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.758968 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.778530 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:50Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.785229 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.785307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.785326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.785357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.785377 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.806486 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a4704f7-ede0-4833-ba79-415de5d798cc\\\",\\\"systemUUID\\\":\\\"e3275d1e-5ae6-4e54-b0fa-71e35cbe4ac0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:50Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:50 crc kubenswrapper[4769]: E0131 16:30:50.806661 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.808689 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.808748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.808769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.808800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.808820 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.917090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.917164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.917181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.917211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:50 crc kubenswrapper[4769]: I0131 16:30:50.917272 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:50Z","lastTransitionTime":"2026-01-31T16:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.020434 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.020472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.020483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.020521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.020535 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.123644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.123683 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.123691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.123705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.123714 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.226669 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.226715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.226724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.226740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.226752 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.329401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.329469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.329482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.329525 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.329538 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.432263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.432339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.432358 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.432393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.432413 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.513338 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:51 crc kubenswrapper[4769]: E0131 16:30:51.513616 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:51 crc kubenswrapper[4769]: E0131 16:30:51.513743 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs podName:428b0729-22d7-4feb-a392-1ec77e5acbc0 nodeName:}" failed. No retries permitted until 2026-01-31 16:31:55.513713163 +0000 UTC m=+163.587881872 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs") pod "network-metrics-daemon-bl9cd" (UID: "428b0729-22d7-4feb-a392-1ec77e5acbc0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.536061 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.536129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.536148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.536176 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.536195 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.639215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.639279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.639295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.639322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.639340 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.707601 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:51 crc kubenswrapper[4769]: E0131 16:30:51.707852 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.742665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.742709 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.742720 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.742735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.742746 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.752036 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 16:37:41.767116314 +0000 UTC Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.845866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.845902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.845912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.845926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.845937 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.949174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.949268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.949287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.949310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:51 crc kubenswrapper[4769]: I0131 16:30:51.949327 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:51Z","lastTransitionTime":"2026-01-31T16:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.052411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.052477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.052495 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.052597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.052618 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.155244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.155299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.155315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.155335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.155351 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.258305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.258355 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.258366 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.258385 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.258397 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.361628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.361675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.361684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.361699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.361708 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.465448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.465482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.465507 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.465523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.465532 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.569357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.569409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.569421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.569445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.569461 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.673839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.673919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.673938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.673968 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.673990 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.708091 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:52 crc kubenswrapper[4769]: E0131 16:30:52.708318 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.708564 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.708672 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:52 crc kubenswrapper[4769]: E0131 16:30:52.708871 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:52 crc kubenswrapper[4769]: E0131 16:30:52.709037 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.732929 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21fa2db2-f448-487d-9ddb-ba4da28e8ffa\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0131 16:29:26.255718 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0131 16:29:26.257930 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3522701725/tls.crt::/tmp/serving-cert-3522701725/tls.key\\\\\\\"\\\\nI0131 16:29:32.222719 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0131 16:29:32.227365 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0131 16:29:32.227391 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0131 16:29:32.227414 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0131 16:29:32.227419 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0131 16:29:32.234321 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0131 16:29:32.234340 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0131 16:29:32.234349 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0131 16:29:32.234352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0131 16:29:32.234355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0131 16:29:32.234357 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0131 16:29:32.234523 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0131 16:29:32.237094 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.752538 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 02:38:44.507100981 +0000 UTC Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.753062 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"259af7b3-780a-4464-9596-a063fecea409\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cc4c0f2131eeb3ea03f5572e41e6c9f86ef15f5d6b70880be88ae6219eadecab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7027bbe350b6afd5a1563f64215b882ff809f1474dcce7a15a843756b7595233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a05b68fb33a04a94268458bbaa7f891e2d493dbe5a27d7e12ced0d561a70e938\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7cff45082b65da86bcca88d363692fdcaffbae44f33b5ae53a09b5aea7eff212\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.770489 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79dca88fae791047a9091e1fcad57d33d0061bbb52395ed325e2658e5fd6ffc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.777914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.777979 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.778002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.778036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.778056 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.793366 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.813126 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef20199c8b7874b9d34bc38aed75d3d0dc669db3dfaff3218a1a4e249330daca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.832355 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-lw4fx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"97c97bcd-bd44-4fdf-a90a-2d6be88c23e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f603aa4ef9fa0a10f90946e243cd0d25dcd6dba58c86a4378085addf26d95233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6q9l7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:38Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-lw4fx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.857478 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b28ca5-e4cc-4733-8fb3-f5bc2c30ee86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0717f838e9dd04fe04218d3858edda9cd76359c43d888bef60f5c6d533d35ec1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd57645c8ddb97e6fb290f81195428e2bfd6b3af321e260cd4007cadc0fc496\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d282bca47e56b022765460bd4e2c34df8d15e19b52199a1cded767fada46ae2b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a52d14237bd46861c4f0ee3500a8cc3baf688f273b03262ada1439aaea53eb61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52ce0326d494aa79957bf95a8d09e34edec45f9d6145f648b035aed46693afa0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60c6336ddc7116c2d4e7c6d7c9e95f19b1b4c3c752a291ab6cd798fcdb76d2ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://502a1f0176c5e13455fc3ce0f6ca36385880f97689263220bc94494f4fb5e547\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e529241eda20b31671e3674411aacf20711d7b27804d7978b48bcacaadf8a25d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.874625 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.880622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.880894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.881056 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.881210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.881351 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.894097 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e5e34357e35a0bda20351c442465d7866c927dfad1a6c8acf1dc32b52a326ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb37e108557965f63da49a443c6667212f8ab8eb0a7099a0a106c45e00e78e76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.912246 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-slrbh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"48d46c05-78b8-4355-9027-77efbbfbe87c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c68712921d6f140f1028a42cadcda30d38e3a9772ad3ca53e6d7b3b039433b40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hqcpx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-slrbh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.927456 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d851433b-1b86-4634-93e2-2c413b1c4f6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0acdb96d69965a7e3f18674c45f56b62ffa359a948133793971dd31fd8df1f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca9119b9d21d895d756b0210339f2b39516f7f4bd84d912e2dc63039f66b9ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.947449 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb8dd1c1-8ad0-4df1-9eb7-f7e36509abac\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8f8d35bedc1d437cf45b14a416b9e5ed1610d7ecd30603f9a179406839220a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1712a54b482696918a3ddb7294c16ce3676cb56c2928b721be12bcd390085e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f22b718ef3d2b16061b2c0fe48c8fe612b3e2b63baa2925fce7a1ad9552b090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.971547 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-g5kbw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a7cfe09-9892-494d-a420-5d720afb3df3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:20Z\\\",\\\"message\\\":\\\"2026-01-31T16:29:35+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97\\\\n2026-01-31T16:29:35+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7c3980f7-b7ed-4a09-9300-a99efa85ce97 to /host/opt/cni/bin/\\\\n2026-01-31T16:29:35Z [verbose] multus-daemon started\\\\n2026-01-31T16:29:35Z [verbose] Readiness Indicator file check\\\\n2026-01-31T16:30:20Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m86wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-g5kbw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.985042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.985105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.985125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.985152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.985170 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:52Z","lastTransitionTime":"2026-01-31T16:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:52 crc kubenswrapper[4769]: I0131 16:30:52.990583 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39e875a-bea7-4e27-af9a-f769a493efe7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c9c0d071a489b7537159a33cfb524a36c03fb2b7dbb0bff1c4d966c26aa4a503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a326f3c1d78ff42444724a0595281be656bfe3f92ff5716a089e7a6c6828d34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6shzp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kvc58\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:52Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.012282 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:53Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.036101 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rftqz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f9e971a-93ce-4a49-a970-a2789486d12c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f591a190eaf4b09cdb5961545cba8b3e469d2dd5b6b729627af238d6d8ebb410\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a15add21aeb6b5d2158b2212c1e20e7a8f07878b4e67ed1a9a4e45bf5f72b78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://055fc0dec6291c52dd242ded0959674881e194259be62e9e6b6b5bf89d278880\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edaad1d6cf3947e8802477f1512fc2591ab9343ddd0adbe2831973f0a3b74651\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ddfa0b565bf93932b355851f8a13f63ac13cf80afe5f9023013b5398b30de5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8760aefed217e4f815e1b5230096166da63cbbf3add78e2c8835436e3196d6a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a7d7784d6e2301b162007d62c5118c3638f25fdf3655b5fad9ad1925f3e0ef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mrkdd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rftqz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:53Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.052913 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1d352f75-43f7-4b8c-867e-cfb17bbbe011\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e57f0adc59ebcc50644648ee64395f6834328fec384b3d8c83b9758314ea18d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4w7km\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-4bqbm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:53Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.087092 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86f2019b-d6ca-4e73-9dac-52fe746489cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-31T16:30:29Z\\\",\\\"message\\\":\\\"7.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:29Z is after 2025-08-24T17:21:41Z]\\\\nI0131 16:30:29.569813 6816 services_controller.go:452] Built service openshift-console/console per-node LB for network=default: []services.LB{}\\\\nI0131 16:30:29.570939 6816 transact.go:42] Configuring OVN: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-31T16:30:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-31T16:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-31T16:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-31T16:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z9jb8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2r9tc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:53Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.089613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.089855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.090163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.090352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.090493 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.108002 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b0729-22d7-4feb-a392-1ec77e5acbc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-31T16:29:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xzcbr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-31T16:29:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-bl9cd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-31T16:30:53Z is after 2025-08-24T17:21:41Z" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.194182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.194284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.194312 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.194386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.194420 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.298086 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.298172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.298197 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.298235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.298259 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.402028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.402250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.402276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.402307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.402328 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.506254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.506337 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.506361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.506394 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.506418 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.610297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.610350 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.610366 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.610392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.610409 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.708022 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:53 crc kubenswrapper[4769]: E0131 16:30:53.708154 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.713308 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.713392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.713419 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.713624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.713671 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.753727 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 08:13:56.138763565 +0000 UTC Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.818190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.818344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.818371 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.818411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.818440 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.922913 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.922984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.923001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.923031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:53 crc kubenswrapper[4769]: I0131 16:30:53.923054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:53Z","lastTransitionTime":"2026-01-31T16:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.026057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.026115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.026127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.026146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.026184 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.129124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.129190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.129204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.129223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.129235 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.232744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.232802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.232815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.232832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.232843 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.335880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.335952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.335972 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.335999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.336019 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.439515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.439552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.439564 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.439583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.439595 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.543118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.543186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.543206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.543232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.543251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.646043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.646082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.646095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.646111 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.646126 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.708179 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.708218 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.708222 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:54 crc kubenswrapper[4769]: E0131 16:30:54.708356 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:54 crc kubenswrapper[4769]: E0131 16:30:54.708640 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:54 crc kubenswrapper[4769]: E0131 16:30:54.708715 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.748819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.748877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.748896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.748922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.748940 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.754185 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 02:35:07.271552298 +0000 UTC Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.852139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.852179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.852192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.852209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.852221 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.955301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.955347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.955361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.955379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:54 crc kubenswrapper[4769]: I0131 16:30:54.955395 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:54Z","lastTransitionTime":"2026-01-31T16:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.058042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.058097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.058114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.058138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.058155 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.160580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.160611 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.160621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.160633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.160644 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.263001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.263079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.263106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.263138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.263161 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.365699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.365737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.365746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.365767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.365780 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.468840 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.468883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.468895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.468908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.468918 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.572451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.572595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.572624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.572664 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.572689 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.677592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.677660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.677677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.677702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.677720 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.708203 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:55 crc kubenswrapper[4769]: E0131 16:30:55.708611 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.754931 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 14:33:35.197149259 +0000 UTC Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.781040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.781136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.781207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.781360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.781389 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.884236 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.884275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.884283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.884304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.884315 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.987704 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.987796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.987822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.987858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:55 crc kubenswrapper[4769]: I0131 16:30:55.987882 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:55Z","lastTransitionTime":"2026-01-31T16:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.091759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.091841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.091867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.091901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.091928 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.196131 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.196198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.196216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.196246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.196268 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.304405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.304485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.304766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.304832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.304854 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.408677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.408769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.408791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.408821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.408844 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.512106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.512250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.512280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.512316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.512341 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.615405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.615563 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.615586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.615611 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.615632 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.708102 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.708192 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.708643 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:56 crc kubenswrapper[4769]: E0131 16:30:56.708798 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:56 crc kubenswrapper[4769]: E0131 16:30:56.708809 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:56 crc kubenswrapper[4769]: E0131 16:30:56.709099 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.718266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.718315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.718326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.718342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.718354 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.755907 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 03:39:31.670926227 +0000 UTC Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.822797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.823716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.823767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.823805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.823833 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.928050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.928131 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.928152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.928185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:56 crc kubenswrapper[4769]: I0131 16:30:56.928206 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:56Z","lastTransitionTime":"2026-01-31T16:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.032133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.032218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.032235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.032262 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.032280 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.135835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.135894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.135908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.135929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.135942 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.240878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.240952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.240970 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.240996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.241014 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.345037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.345104 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.345127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.345159 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.345180 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.450048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.450097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.450137 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.450160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.450176 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.553235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.553288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.553304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.553323 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.553337 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.656658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.656723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.656739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.656765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.656784 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.707947 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:57 crc kubenswrapper[4769]: E0131 16:30:57.708661 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.709168 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:30:57 crc kubenswrapper[4769]: E0131 16:30:57.709649 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2r9tc_openshift-ovn-kubernetes(86f2019b-d6ca-4e73-9dac-52fe746489cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.756608 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 16:41:05.266805394 +0000 UTC Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.760064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.760141 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.760168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.760199 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.760221 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.863318 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.863378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.863398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.863425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.863447 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.967590 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.967656 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.967666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.967695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:57 crc kubenswrapper[4769]: I0131 16:30:57.967716 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:57Z","lastTransitionTime":"2026-01-31T16:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.072238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.072324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.072348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.072380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.072404 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.175855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.175947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.175974 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.176011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.176036 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.279811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.279890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.279907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.279931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.279946 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.382725 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.382802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.382820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.382849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.382868 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.486719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.486790 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.486808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.486838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.486858 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.590119 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.590190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.590207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.590234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.590256 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.693603 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.693666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.693686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.700085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.700180 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.708101 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.708159 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.708131 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:30:58 crc kubenswrapper[4769]: E0131 16:30:58.708290 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:30:58 crc kubenswrapper[4769]: E0131 16:30:58.708407 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:30:58 crc kubenswrapper[4769]: E0131 16:30:58.708659 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.757243 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 20:43:27.565813925 +0000 UTC Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.805097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.805133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.805149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.805173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.805190 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.908817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.908883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.908902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.908930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:58 crc kubenswrapper[4769]: I0131 16:30:58.908954 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:58Z","lastTransitionTime":"2026-01-31T16:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.014594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.014673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.014692 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.014726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.014755 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.118736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.118830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.118850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.118879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.118899 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.222576 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.222620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.222629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.222644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.222653 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.326457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.326559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.326584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.326615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.326635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.430660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.430734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.430756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.430790 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.430817 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.535010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.535115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.535139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.535166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.535184 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.638711 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.638784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.638805 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.638836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.638858 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.708103 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:30:59 crc kubenswrapper[4769]: E0131 16:30:59.709009 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.744356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.744423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.744443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.744477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.744522 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.757824 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 02:24:02.024452002 +0000 UTC Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.848312 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.848383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.848404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.848435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.848458 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.952707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.952766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.952784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.952815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:30:59 crc kubenswrapper[4769]: I0131 16:30:59.952838 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:30:59Z","lastTransitionTime":"2026-01-31T16:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.057543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.057633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.057654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.057690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.057722 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.161590 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.161677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.161734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.161769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.161794 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.266558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.266626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.266644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.266674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.266700 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.370028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.370622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.370645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.370673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.370696 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.474426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.474491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.474537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.474567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.474590 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.578442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.578549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.578572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.578632 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.578655 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.682224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.682375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.682397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.682424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.682443 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.707755 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.707844 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:00 crc kubenswrapper[4769]: E0131 16:31:00.708022 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.708094 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:00 crc kubenswrapper[4769]: E0131 16:31:00.708575 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:00 crc kubenswrapper[4769]: E0131 16:31:00.708726 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.758818 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 11:55:31.220734092 +0000 UTC Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.785863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.785961 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.785977 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.785999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.786016 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.848412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.848566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.848594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.848633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.848653 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-31T16:31:00Z","lastTransitionTime":"2026-01-31T16:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.930884 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76"] Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.932035 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.936183 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.936210 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.937804 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.938291 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.953734 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.953808 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/61333e43-04c6-40cf-b19c-e190e7ec293b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.953836 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/61333e43-04c6-40cf-b19c-e190e7ec293b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.953853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61333e43-04c6-40cf-b19c-e190e7ec293b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:00 crc kubenswrapper[4769]: I0131 16:31:00.953887 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.000276 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=23.000241683 podStartE2EDuration="23.000241683s" podCreationTimestamp="2026-01-31 16:30:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:00.978278471 +0000 UTC m=+109.052447160" watchObservedRunningTime="2026-01-31 16:31:01.000241683 +0000 UTC m=+109.074410392" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.040613 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-slrbh" podStartSLOduration=88.040578544 podStartE2EDuration="1m28.040578544s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.039813463 +0000 UTC m=+109.113982192" watchObservedRunningTime="2026-01-31 16:31:01.040578544 +0000 UTC m=+109.114747243" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.054849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.054915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.054965 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/61333e43-04c6-40cf-b19c-e190e7ec293b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.054986 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/61333e43-04c6-40cf-b19c-e190e7ec293b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.055007 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61333e43-04c6-40cf-b19c-e190e7ec293b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.055100 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.055792 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/61333e43-04c6-40cf-b19c-e190e7ec293b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.058259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/61333e43-04c6-40cf-b19c-e190e7ec293b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.069440 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/61333e43-04c6-40cf-b19c-e190e7ec293b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.111455 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=30.11143149 podStartE2EDuration="30.11143149s" podCreationTimestamp="2026-01-31 16:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.089107288 +0000 UTC m=+109.163275997" watchObservedRunningTime="2026-01-31 16:31:01.11143149 +0000 UTC m=+109.185600159" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.116933 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/61333e43-04c6-40cf-b19c-e190e7ec293b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-zhl76\" (UID: \"61333e43-04c6-40cf-b19c-e190e7ec293b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.136387 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=86.136348156 podStartE2EDuration="1m26.136348156s" podCreationTimestamp="2026-01-31 16:29:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.111837572 +0000 UTC m=+109.186006301" watchObservedRunningTime="2026-01-31 16:31:01.136348156 +0000 UTC m=+109.210516855" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.151388 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-g5kbw" podStartSLOduration=88.151362351 podStartE2EDuration="1m28.151362351s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.13754142 +0000 UTC m=+109.211710129" watchObservedRunningTime="2026-01-31 16:31:01.151362351 +0000 UTC m=+109.225531020" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.151668 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kvc58" podStartSLOduration=87.1516639 podStartE2EDuration="1m27.1516639s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.151539536 +0000 UTC m=+109.225708245" watchObservedRunningTime="2026-01-31 16:31:01.1516639 +0000 UTC m=+109.225832569" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.187975 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-rftqz" podStartSLOduration=88.187817863 podStartE2EDuration="1m28.187817863s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.186413074 +0000 UTC m=+109.260581733" watchObservedRunningTime="2026-01-31 16:31:01.187817863 +0000 UTC m=+109.261986572" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.233290 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podStartSLOduration=88.23326418 podStartE2EDuration="1m28.23326418s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.206581954 +0000 UTC m=+109.280750623" watchObservedRunningTime="2026-01-31 16:31:01.23326418 +0000 UTC m=+109.307432849" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.255519 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.306666 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=62.306644037 podStartE2EDuration="1m2.306644037s" podCreationTimestamp="2026-01-31 16:29:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.305417053 +0000 UTC m=+109.379585712" watchObservedRunningTime="2026-01-31 16:31:01.306644037 +0000 UTC m=+109.380812706" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.306899 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.306891614 podStartE2EDuration="1m29.306891614s" podCreationTimestamp="2026-01-31 16:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.288793032 +0000 UTC m=+109.362961701" watchObservedRunningTime="2026-01-31 16:31:01.306891614 +0000 UTC m=+109.381060283" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.325835 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" event={"ID":"61333e43-04c6-40cf-b19c-e190e7ec293b","Type":"ContainerStarted","Data":"aeff4c0c13b6d013d0fcdc2d7325efb2a4354d066b40ddba86e9a4534874ff4b"} Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.707997 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:01 crc kubenswrapper[4769]: E0131 16:31:01.708181 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.759563 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 01:50:43.602827388 +0000 UTC Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.759653 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 31 16:31:01 crc kubenswrapper[4769]: I0131 16:31:01.767897 4769 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.332111 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" event={"ID":"61333e43-04c6-40cf-b19c-e190e7ec293b","Type":"ContainerStarted","Data":"4931466ba2021f5f642ece82c7d19b1481c03a17b5650a401cc828d463d02c36"} Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.359303 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-lw4fx" podStartSLOduration=88.359272496 podStartE2EDuration="1m28.359272496s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:01.391268663 +0000 UTC m=+109.465437332" watchObservedRunningTime="2026-01-31 16:31:02.359272496 +0000 UTC m=+110.433441205" Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.359475 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-zhl76" podStartSLOduration=89.359466371 podStartE2EDuration="1m29.359466371s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:02.356676352 +0000 UTC m=+110.430845031" watchObservedRunningTime="2026-01-31 16:31:02.359466371 +0000 UTC m=+110.433635070" Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.707456 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.707618 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:02 crc kubenswrapper[4769]: I0131 16:31:02.707641 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:02 crc kubenswrapper[4769]: E0131 16:31:02.708621 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:02 crc kubenswrapper[4769]: E0131 16:31:02.708929 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:02 crc kubenswrapper[4769]: E0131 16:31:02.709028 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:03 crc kubenswrapper[4769]: I0131 16:31:03.707071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:03 crc kubenswrapper[4769]: E0131 16:31:03.707179 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:04 crc kubenswrapper[4769]: I0131 16:31:04.707842 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:04 crc kubenswrapper[4769]: I0131 16:31:04.707929 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:04 crc kubenswrapper[4769]: I0131 16:31:04.707845 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:04 crc kubenswrapper[4769]: E0131 16:31:04.708241 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:04 crc kubenswrapper[4769]: E0131 16:31:04.708297 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:04 crc kubenswrapper[4769]: E0131 16:31:04.708488 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:05 crc kubenswrapper[4769]: I0131 16:31:05.707770 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:05 crc kubenswrapper[4769]: E0131 16:31:05.708341 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:06 crc kubenswrapper[4769]: I0131 16:31:06.707933 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:06 crc kubenswrapper[4769]: I0131 16:31:06.708121 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:06 crc kubenswrapper[4769]: E0131 16:31:06.708198 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:06 crc kubenswrapper[4769]: I0131 16:31:06.707954 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:06 crc kubenswrapper[4769]: E0131 16:31:06.708351 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:06 crc kubenswrapper[4769]: E0131 16:31:06.708475 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.358111 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/1.log" Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.358914 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/0.log" Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.359000 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a7cfe09-9892-494d-a420-5d720afb3df3" containerID="eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40" exitCode=1 Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.359058 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerDied","Data":"eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40"} Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.359138 4769 scope.go:117] "RemoveContainer" containerID="e1fb743faaf9717b716d96f7b36096ee11da25ac3eeeb601bdc6d8f20faf3a3f" Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.359950 4769 scope.go:117] "RemoveContainer" containerID="eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40" Jan 31 16:31:07 crc kubenswrapper[4769]: E0131 16:31:07.360282 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-g5kbw_openshift-multus(4a7cfe09-9892-494d-a420-5d720afb3df3)\"" pod="openshift-multus/multus-g5kbw" podUID="4a7cfe09-9892-494d-a420-5d720afb3df3" Jan 31 16:31:07 crc kubenswrapper[4769]: I0131 16:31:07.707245 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:07 crc kubenswrapper[4769]: E0131 16:31:07.707528 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:08 crc kubenswrapper[4769]: I0131 16:31:08.367131 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/1.log" Jan 31 16:31:08 crc kubenswrapper[4769]: I0131 16:31:08.708111 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:08 crc kubenswrapper[4769]: I0131 16:31:08.708177 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:08 crc kubenswrapper[4769]: I0131 16:31:08.708363 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:08 crc kubenswrapper[4769]: E0131 16:31:08.708548 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:08 crc kubenswrapper[4769]: E0131 16:31:08.708745 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:08 crc kubenswrapper[4769]: E0131 16:31:08.708979 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:09 crc kubenswrapper[4769]: I0131 16:31:09.708094 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:09 crc kubenswrapper[4769]: E0131 16:31:09.708314 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:10 crc kubenswrapper[4769]: I0131 16:31:10.707438 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:10 crc kubenswrapper[4769]: E0131 16:31:10.707712 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:10 crc kubenswrapper[4769]: I0131 16:31:10.707769 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:10 crc kubenswrapper[4769]: I0131 16:31:10.707929 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:10 crc kubenswrapper[4769]: E0131 16:31:10.707970 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:10 crc kubenswrapper[4769]: E0131 16:31:10.708141 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:11 crc kubenswrapper[4769]: I0131 16:31:11.707120 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:11 crc kubenswrapper[4769]: E0131 16:31:11.707670 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:12 crc kubenswrapper[4769]: I0131 16:31:12.707841 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:12 crc kubenswrapper[4769]: I0131 16:31:12.707947 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:12 crc kubenswrapper[4769]: E0131 16:31:12.709875 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:12 crc kubenswrapper[4769]: I0131 16:31:12.709992 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:12 crc kubenswrapper[4769]: E0131 16:31:12.710150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:12 crc kubenswrapper[4769]: E0131 16:31:12.710259 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:12 crc kubenswrapper[4769]: I0131 16:31:12.711060 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:31:12 crc kubenswrapper[4769]: E0131 16:31:12.741407 4769 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 31 16:31:12 crc kubenswrapper[4769]: E0131 16:31:12.828460 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.389392 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/3.log" Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.392015 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerStarted","Data":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.393187 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.445026 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podStartSLOduration=100.445000081 podStartE2EDuration="1m40.445000081s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:13.439844805 +0000 UTC m=+121.514013474" watchObservedRunningTime="2026-01-31 16:31:13.445000081 +0000 UTC m=+121.519168750" Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.708009 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:13 crc kubenswrapper[4769]: E0131 16:31:13.708152 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.825601 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bl9cd"] Jan 31 16:31:13 crc kubenswrapper[4769]: I0131 16:31:13.825873 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:13 crc kubenswrapper[4769]: E0131 16:31:13.826095 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:14 crc kubenswrapper[4769]: I0131 16:31:14.707678 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:14 crc kubenswrapper[4769]: E0131 16:31:14.707935 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:14 crc kubenswrapper[4769]: I0131 16:31:14.708354 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:14 crc kubenswrapper[4769]: E0131 16:31:14.708466 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:15 crc kubenswrapper[4769]: I0131 16:31:15.707820 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:15 crc kubenswrapper[4769]: I0131 16:31:15.707890 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:15 crc kubenswrapper[4769]: E0131 16:31:15.708097 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:15 crc kubenswrapper[4769]: E0131 16:31:15.708314 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:16 crc kubenswrapper[4769]: I0131 16:31:16.707746 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:16 crc kubenswrapper[4769]: I0131 16:31:16.707814 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:16 crc kubenswrapper[4769]: E0131 16:31:16.708090 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:16 crc kubenswrapper[4769]: E0131 16:31:16.708208 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:17 crc kubenswrapper[4769]: I0131 16:31:17.708256 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:17 crc kubenswrapper[4769]: I0131 16:31:17.708257 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:17 crc kubenswrapper[4769]: E0131 16:31:17.708563 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:17 crc kubenswrapper[4769]: E0131 16:31:17.708653 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:17 crc kubenswrapper[4769]: E0131 16:31:17.830565 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:31:18 crc kubenswrapper[4769]: I0131 16:31:18.707911 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:18 crc kubenswrapper[4769]: I0131 16:31:18.707924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:18 crc kubenswrapper[4769]: E0131 16:31:18.708150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:18 crc kubenswrapper[4769]: E0131 16:31:18.708256 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:19 crc kubenswrapper[4769]: I0131 16:31:19.708071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:19 crc kubenswrapper[4769]: I0131 16:31:19.708170 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:19 crc kubenswrapper[4769]: E0131 16:31:19.708241 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:19 crc kubenswrapper[4769]: E0131 16:31:19.708398 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:20 crc kubenswrapper[4769]: I0131 16:31:20.707862 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:20 crc kubenswrapper[4769]: I0131 16:31:20.707883 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:20 crc kubenswrapper[4769]: E0131 16:31:20.708064 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:20 crc kubenswrapper[4769]: E0131 16:31:20.708147 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:21 crc kubenswrapper[4769]: I0131 16:31:21.707769 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:21 crc kubenswrapper[4769]: I0131 16:31:21.707773 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:21 crc kubenswrapper[4769]: E0131 16:31:21.707943 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:21 crc kubenswrapper[4769]: E0131 16:31:21.708100 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:22 crc kubenswrapper[4769]: I0131 16:31:22.707242 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:22 crc kubenswrapper[4769]: I0131 16:31:22.709253 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:22 crc kubenswrapper[4769]: E0131 16:31:22.709652 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:22 crc kubenswrapper[4769]: E0131 16:31:22.709752 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:22 crc kubenswrapper[4769]: I0131 16:31:22.709783 4769 scope.go:117] "RemoveContainer" containerID="eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40" Jan 31 16:31:22 crc kubenswrapper[4769]: E0131 16:31:22.831391 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:31:23 crc kubenswrapper[4769]: I0131 16:31:23.434849 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/1.log" Jan 31 16:31:23 crc kubenswrapper[4769]: I0131 16:31:23.434941 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerStarted","Data":"4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612"} Jan 31 16:31:23 crc kubenswrapper[4769]: I0131 16:31:23.707886 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:23 crc kubenswrapper[4769]: I0131 16:31:23.707937 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:23 crc kubenswrapper[4769]: E0131 16:31:23.708051 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:23 crc kubenswrapper[4769]: E0131 16:31:23.708161 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:24 crc kubenswrapper[4769]: I0131 16:31:24.707160 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:24 crc kubenswrapper[4769]: E0131 16:31:24.707301 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:24 crc kubenswrapper[4769]: I0131 16:31:24.707160 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:24 crc kubenswrapper[4769]: E0131 16:31:24.707579 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:25 crc kubenswrapper[4769]: I0131 16:31:25.707453 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:25 crc kubenswrapper[4769]: I0131 16:31:25.707552 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:25 crc kubenswrapper[4769]: E0131 16:31:25.707640 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:25 crc kubenswrapper[4769]: E0131 16:31:25.707768 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:26 crc kubenswrapper[4769]: I0131 16:31:26.708055 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:26 crc kubenswrapper[4769]: E0131 16:31:26.708263 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 31 16:31:26 crc kubenswrapper[4769]: I0131 16:31:26.708091 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:26 crc kubenswrapper[4769]: E0131 16:31:26.708633 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 31 16:31:27 crc kubenswrapper[4769]: I0131 16:31:27.708920 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:27 crc kubenswrapper[4769]: I0131 16:31:27.709011 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:27 crc kubenswrapper[4769]: E0131 16:31:27.709165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 31 16:31:27 crc kubenswrapper[4769]: E0131 16:31:27.709302 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-bl9cd" podUID="428b0729-22d7-4feb-a392-1ec77e5acbc0" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.707474 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.707781 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.709556 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.709967 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.710315 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 31 16:31:28 crc kubenswrapper[4769]: I0131 16:31:28.710318 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 31 16:31:29 crc kubenswrapper[4769]: I0131 16:31:29.707185 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:29 crc kubenswrapper[4769]: I0131 16:31:29.707185 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:29 crc kubenswrapper[4769]: I0131 16:31:29.710395 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 31 16:31:29 crc kubenswrapper[4769]: I0131 16:31:29.710460 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 31 16:31:29 crc kubenswrapper[4769]: I0131 16:31:29.869930 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.482398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.531886 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pwzwp"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.532820 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.533690 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-qt6ps"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.543097 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.543107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.543468 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.545169 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.555553 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.556161 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.556280 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.556432 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.560801 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.561380 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-v4t65"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.561762 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.561831 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.562027 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.562028 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.562773 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.562828 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563008 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563166 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563205 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563283 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563433 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563766 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563871 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.564018 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.563881 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.564147 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.568004 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.569010 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.569387 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.571122 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-v4t65"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.572980 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-qt6ps"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.576851 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pwzwp"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.579902 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.581680 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.581762 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.581936 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582162 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582246 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582357 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582431 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.581682 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582610 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582673 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582723 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582838 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.582863 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.584023 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.584086 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.584097 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.584536 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.585736 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.589569 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.589816 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.589986 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.590091 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.590319 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.590414 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.590956 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.592928 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.593365 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.596615 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.596724 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h8nkx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.597660 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.599339 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.599569 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.602732 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.603865 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.604435 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.605316 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.605944 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.608138 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-ds894"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.608570 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.608700 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.610010 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.618701 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630725 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630795 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630842 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-config\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630870 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-policies\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630919 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630947 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrpd9\" (UniqueName: \"kubernetes.io/projected/6f0058ec-2d51-4750-ba72-32b848e39402-kube-api-access-jrpd9\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630972 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.630997 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-serving-cert\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631017 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bx4f\" (UniqueName: \"kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631049 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85vkv\" (UniqueName: \"kubernetes.io/projected/9b644a0b-f7ef-40f5-8855-0264a224470e-kube-api-access-85vkv\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631078 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-audit\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631104 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-serving-cert\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631128 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4mg6\" (UniqueName: \"kubernetes.io/projected/524dcf6e-3c5c-47db-9941-06a21f9a8194-kube-api-access-z4mg6\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b644a0b-f7ef-40f5-8855-0264a224470e-serving-cert\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631180 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631208 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/524dcf6e-3c5c-47db-9941-06a21f9a8194-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631234 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631255 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-dir\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631282 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/524dcf6e-3c5c-47db-9941-06a21f9a8194-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631311 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-config\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631368 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st4pc\" (UniqueName: \"kubernetes.io/projected/1ecd290f-188e-4ff0-a52f-6286412a0b5a-kube-api-access-st4pc\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.631395 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.643935 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-2jt6j"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.644590 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645676 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-images\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645698 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-encryption-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfvw2\" (UniqueName: \"kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645752 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-client\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645766 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-serving-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645795 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-encryption-config\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645812 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-node-pullsecrets\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645831 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc8xt\" (UniqueName: \"kubernetes.io/projected/54adcb76-b278-4991-9999-d6b5f8c8c1d6-kube-api-access-pc8xt\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645845 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-image-import-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645862 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-client\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645886 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ecd290f-188e-4ff0-a52f-6286412a0b5a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-audit-dir\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645919 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.645934 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-trusted-ca\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.651877 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.651966 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652325 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652334 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652433 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652548 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652611 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652732 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652815 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.652945 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653023 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653157 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653220 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653304 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653336 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653443 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653532 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.653764 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654375 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654572 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654701 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654760 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654829 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654940 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.654951 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.655113 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.655829 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659059 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659208 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659374 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659466 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659586 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659798 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.659979 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.660138 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.660195 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.660147 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.660922 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.669809 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.670590 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.670797 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.673345 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.678148 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.678970 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.679157 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.679379 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.679642 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.682071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.684046 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.684264 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.684462 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.685113 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.687809 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vgpw"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.688514 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-4lpmw"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.688978 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.689275 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.692606 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-g2568"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.693177 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.694395 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.695228 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.695404 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.695751 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.695941 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696365 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696401 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696515 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696665 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696772 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696881 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.696977 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.697102 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.699474 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.700475 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.703543 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.704247 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.704562 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.704857 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.722064 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.722881 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.726542 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nxr6j"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.726967 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.727380 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.727689 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.728021 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.728459 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.750429 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.753269 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.753751 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gwhw8"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.754163 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.754631 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755095 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755186 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755234 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755896 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-node-pullsecrets\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755923 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-trusted-ca-bundle\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755944 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc8xt\" (UniqueName: \"kubernetes.io/projected/54adcb76-b278-4991-9999-d6b5f8c8c1d6-kube-api-access-pc8xt\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755960 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-image-import-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.755976 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-client\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756048 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ecd290f-188e-4ff0-a52f-6286412a0b5a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756064 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-audit-dir\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756142 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756160 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-trusted-ca\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd9cp\" (UniqueName: \"kubernetes.io/projected/2e845173-60d6-4bbb-a479-e752d55a4d7d-kube-api-access-bd9cp\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756197 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/82fed0ed-2731-404f-a5f7-1552993ecd71-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756212 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bdbj\" (UniqueName: \"kubernetes.io/projected/82fed0ed-2731-404f-a5f7-1552993ecd71-kube-api-access-6bdbj\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756250 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-config\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756287 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756304 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-policies\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756294 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-audit-dir\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756369 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6f0058ec-2d51-4750-ba72-32b848e39402-node-pullsecrets\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756538 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.757014 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.757306 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-image-import-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.756321 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758029 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrpd9\" (UniqueName: \"kubernetes.io/projected/6f0058ec-2d51-4750-ba72-32b848e39402-kube-api-access-jrpd9\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758049 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758064 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-serving-cert\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758079 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bx4f\" (UniqueName: \"kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758120 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85vkv\" (UniqueName: \"kubernetes.io/projected/9b644a0b-f7ef-40f5-8855-0264a224470e-kube-api-access-85vkv\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758139 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758154 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-audit\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758170 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btrln\" (UniqueName: \"kubernetes.io/projected/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-kube-api-access-btrln\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758191 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-serving-cert\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758207 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4mg6\" (UniqueName: \"kubernetes.io/projected/524dcf6e-3c5c-47db-9941-06a21f9a8194-kube-api-access-z4mg6\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758222 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b644a0b-f7ef-40f5-8855-0264a224470e-serving-cert\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758238 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758253 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-service-ca\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758270 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/524dcf6e-3c5c-47db-9941-06a21f9a8194-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758286 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-oauth-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758303 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758319 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-dir\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758334 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/524dcf6e-3c5c-47db-9941-06a21f9a8194-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758349 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758364 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758383 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-config\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st4pc\" (UniqueName: \"kubernetes.io/projected/1ecd290f-188e-4ff0-a52f-6286412a0b5a-kube-api-access-st4pc\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758415 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-machine-approver-tls\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758433 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-auth-proxy-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758452 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758467 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758484 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758518 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-images\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758534 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-encryption-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758552 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfvw2\" (UniqueName: \"kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758577 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-client\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-serving-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758607 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-oauth-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758632 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-encryption-config\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.758718 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.759119 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-trusted-ca\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.759732 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-trusted-ca-bundle\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.759774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-dir\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.760035 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/524dcf6e-3c5c-47db-9941-06a21f9a8194-available-featuregates\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.760373 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.760578 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-audit-policies\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.761548 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.761892 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-serving-ca\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.761962 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.762172 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.762580 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-encryption-config\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.762850 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-serving-cert\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.762951 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.763866 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6f0058ec-2d51-4750-ba72-32b848e39402-audit\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764009 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764129 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-config\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764428 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b644a0b-f7ef-40f5-8855-0264a224470e-config\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54adcb76-b278-4991-9999-d6b5f8c8c1d6-etcd-client\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764672 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.764770 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.765640 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.765676 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/524dcf6e-3c5c-47db-9941-06a21f9a8194-serving-cert\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.766131 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ecd290f-188e-4ff0-a52f-6286412a0b5a-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.768670 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.768932 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b644a0b-f7ef-40f5-8855-0264a224470e-serving-cert\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.769673 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-encryption-config\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.774547 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.780084 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1ecd290f-188e-4ff0-a52f-6286412a0b5a-images\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.781513 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.782072 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.782386 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-etcd-client\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.782611 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f0058ec-2d51-4750-ba72-32b848e39402-serving-cert\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.783338 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.784033 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.792608 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.793340 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.794758 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.795212 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.795745 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.796675 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.797445 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-b4pjt"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.797555 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.798903 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.800766 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-x87ps"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.801379 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.802591 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.803537 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vgpw"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.804583 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.805976 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.806976 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h8nkx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.807950 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.808917 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-g2568"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.809932 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.811243 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.812076 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.813106 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.814260 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.814688 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.815621 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.817202 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.817694 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.818790 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ds894"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.819755 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4pc7f"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.820788 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.820936 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-9vxrh"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.821467 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.822120 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.823821 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.826126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.827550 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-2jt6j"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.829303 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.831221 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nxr6j"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.832910 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.834714 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.837059 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.839384 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.842442 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.845694 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.846941 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.848064 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.849143 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gwhw8"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.850330 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.851568 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9vxrh"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.852763 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-x87ps"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.854107 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4pc7f"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.855026 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.856865 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-b4pjt"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.858404 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-vd8mb"] Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859288 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859369 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-trusted-ca-bundle\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859419 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd9cp\" (UniqueName: \"kubernetes.io/projected/2e845173-60d6-4bbb-a479-e752d55a4d7d-kube-api-access-bd9cp\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859443 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/82fed0ed-2731-404f-a5f7-1552993ecd71-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859463 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bdbj\" (UniqueName: \"kubernetes.io/projected/82fed0ed-2731-404f-a5f7-1552993ecd71-kube-api-access-6bdbj\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859532 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btrln\" (UniqueName: \"kubernetes.io/projected/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-kube-api-access-btrln\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859579 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-service-ca\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859597 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-oauth-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859615 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859639 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-machine-approver-tls\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-auth-proxy-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859677 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.859710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-oauth-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.860227 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.860469 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-auth-proxy-config\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.860540 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-service-ca\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.860732 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-oauth-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.860858 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.861146 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e845173-60d6-4bbb-a479-e752d55a4d7d-trusted-ca-bundle\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.862693 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/82fed0ed-2731-404f-a5f7-1552993ecd71-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.863052 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-oauth-config\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.863584 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-machine-approver-tls\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.865149 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e845173-60d6-4bbb-a479-e752d55a4d7d-console-serving-cert\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.875983 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.894484 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.914515 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.935167 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.954808 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 31 16:31:31 crc kubenswrapper[4769]: I0131 16:31:31.975068 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.015100 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.035103 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.054515 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.081614 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.094557 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.118369 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.135458 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.155116 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.174447 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.194911 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.215090 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.236028 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.282935 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.296108 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.315562 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.335973 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.356209 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.375443 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.395331 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.416086 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.436126 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.463359 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.476173 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.496248 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.515772 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.535683 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.555830 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.575606 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.616819 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.616835 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.636041 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.655968 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.696877 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.697107 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.715350 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.736019 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.756312 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.774151 4769 request.go:700] Waited for 1.019035s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dcatalog-operator-serving-cert&limit=500&resourceVersion=0 Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.776295 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.796573 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.816466 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.835717 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.856732 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.876577 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.898159 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.916445 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.935733 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.976821 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 31 16:31:32 crc kubenswrapper[4769]: I0131 16:31:32.985991 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc8xt\" (UniqueName: \"kubernetes.io/projected/54adcb76-b278-4991-9999-d6b5f8c8c1d6-kube-api-access-pc8xt\") pod \"apiserver-7bbb656c7d-wlgn6\" (UID: \"54adcb76-b278-4991-9999-d6b5f8c8c1d6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.022977 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85vkv\" (UniqueName: \"kubernetes.io/projected/9b644a0b-f7ef-40f5-8855-0264a224470e-kube-api-access-85vkv\") pod \"console-operator-58897d9998-v4t65\" (UID: \"9b644a0b-f7ef-40f5-8855-0264a224470e\") " pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.042014 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4mg6\" (UniqueName: \"kubernetes.io/projected/524dcf6e-3c5c-47db-9941-06a21f9a8194-kube-api-access-z4mg6\") pod \"openshift-config-operator-7777fb866f-v5mpd\" (UID: \"524dcf6e-3c5c-47db-9941-06a21f9a8194\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.063901 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfvw2\" (UniqueName: \"kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2\") pod \"route-controller-manager-6576b87f9c-8td6v\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.082866 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st4pc\" (UniqueName: \"kubernetes.io/projected/1ecd290f-188e-4ff0-a52f-6286412a0b5a-kube-api-access-st4pc\") pod \"machine-api-operator-5694c8668f-qt6ps\" (UID: \"1ecd290f-188e-4ff0-a52f-6286412a0b5a\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.098645 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.102982 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrpd9\" (UniqueName: \"kubernetes.io/projected/6f0058ec-2d51-4750-ba72-32b848e39402-kube-api-access-jrpd9\") pod \"apiserver-76f77b778f-pwzwp\" (UID: \"6f0058ec-2d51-4750-ba72-32b848e39402\") " pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.117340 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.129340 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bx4f\" (UniqueName: \"kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f\") pod \"controller-manager-879f6c89f-79bp9\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.131396 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.136354 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.149060 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.155954 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.158964 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.174335 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.177557 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.186247 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.197433 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.222130 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.236058 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.264623 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.276692 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.298006 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.316060 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.336346 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.359400 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.375649 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.389292 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.395001 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.415822 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.422970 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.436207 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.455296 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.474008 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" event={"ID":"54adcb76-b278-4991-9999-d6b5f8c8c1d6","Type":"ContainerStarted","Data":"9d47b81e940de8d1dd2a3ca76ca5ebe80c287c614a970023582aa619e138e64a"} Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.475092 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.495964 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.516836 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.536138 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.554884 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-pwzwp"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.556037 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.565823 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f0058ec_2d51_4750_ba72_32b848e39402.slice/crio-850b4ff67506d352866a06fc4ffc58cdbb1f059cc3dfbcf68c36c511ccb7a5b2 WatchSource:0}: Error finding container 850b4ff67506d352866a06fc4ffc58cdbb1f059cc3dfbcf68c36c511ccb7a5b2: Status 404 returned error can't find the container with id 850b4ff67506d352866a06fc4ffc58cdbb1f059cc3dfbcf68c36c511ccb7a5b2 Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.575791 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.595242 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.615568 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.632583 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-qt6ps"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.641284 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.649424 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.649485 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-v4t65"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.655934 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.660995 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ecd290f_188e_4ff0_a52f_6286412a0b5a.slice/crio-cc4e3072517de066aca6016d6cb117f0d4534758060e93dbd623806c0fc72953 WatchSource:0}: Error finding container cc4e3072517de066aca6016d6cb117f0d4534758060e93dbd623806c0fc72953: Status 404 returned error can't find the container with id cc4e3072517de066aca6016d6cb117f0d4534758060e93dbd623806c0fc72953 Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.663596 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod524dcf6e_3c5c_47db_9941_06a21f9a8194.slice/crio-ebd2abf4a0b43f329e67873fabe23cc19532c0e0549abb671b53ed4a81e112fa WatchSource:0}: Error finding container ebd2abf4a0b43f329e67873fabe23cc19532c0e0549abb671b53ed4a81e112fa: Status 404 returned error can't find the container with id ebd2abf4a0b43f329e67873fabe23cc19532c0e0549abb671b53ed4a81e112fa Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.665884 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b644a0b_f7ef_40f5_8855_0264a224470e.slice/crio-135a9fb772ebff0e8df891df3fad2dab6e3b5b90b83dcd910841a1cc9c7837a1 WatchSource:0}: Error finding container 135a9fb772ebff0e8df891df3fad2dab6e3b5b90b83dcd910841a1cc9c7837a1: Status 404 returned error can't find the container with id 135a9fb772ebff0e8df891df3fad2dab6e3b5b90b83dcd910841a1cc9c7837a1 Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.675450 4769 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.695019 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.696872 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.698253 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.713855 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1efa9307_bdd7_4ec9_ab59_32196c343838.slice/crio-4eb532f556a29102ff49af3d0a160233bcb17c835601fe63f3fb20e807cfd9f1 WatchSource:0}: Error finding container 4eb532f556a29102ff49af3d0a160233bcb17c835601fe63f3fb20e807cfd9f1: Status 404 returned error can't find the container with id 4eb532f556a29102ff49af3d0a160233bcb17c835601fe63f3fb20e807cfd9f1 Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.717124 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 31 16:31:33 crc kubenswrapper[4769]: W0131 16:31:33.726222 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1cfa945_9049_4fe1_bde7_549e1c5d0e39.slice/crio-788cc8bb7866e5f080995e45c21b9f1a7de64ba610775519633b6ca34de28799 WatchSource:0}: Error finding container 788cc8bb7866e5f080995e45c21b9f1a7de64ba610775519633b6ca34de28799: Status 404 returned error can't find the container with id 788cc8bb7866e5f080995e45c21b9f1a7de64ba610775519633b6ca34de28799 Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.735099 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.770700 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bdbj\" (UniqueName: \"kubernetes.io/projected/82fed0ed-2731-404f-a5f7-1552993ecd71-kube-api-access-6bdbj\") pod \"multus-admission-controller-857f4d67dd-2jt6j\" (UID: \"82fed0ed-2731-404f-a5f7-1552993ecd71\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.774227 4769 request.go:700] Waited for 1.914593874s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.813042 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btrln\" (UniqueName: \"kubernetes.io/projected/0267e024-1c4b-49cd-b1b3-f4afe91bbbfa-kube-api-access-btrln\") pod \"machine-approver-56656f9798-cvn6z\" (UID: \"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.844337 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd9cp\" (UniqueName: \"kubernetes.io/projected/2e845173-60d6-4bbb-a479-e752d55a4d7d-kube-api-access-bd9cp\") pod \"console-f9d7485db-ds894\" (UID: \"2e845173-60d6-4bbb-a479-e752d55a4d7d\") " pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.908908 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912132 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b04b6715-da98-41a2-a034-3ee53201f83d-service-ca-bundle\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912189 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912228 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912254 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd3d29b5-b209-40c6-9c46-521268f3c363-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912304 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912344 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/32e89aaf-9047-4609-a161-4329731e4b61-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912371 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912395 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77rgj\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912417 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-stats-auth\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912460 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912521 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/231d723e-117b-43d1-b664-4e364d7f5d42-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912554 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912595 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-images\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912630 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23df6b36-16f8-4198-acc3-6130a4ea9ca8-proxy-tls\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912652 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-metrics-certs\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912675 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912700 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912725 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912770 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912807 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912829 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231d723e-117b-43d1-b664-4e364d7f5d42-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.912987 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913041 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913271 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913373 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913485 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2fm6\" (UniqueName: \"kubernetes.io/projected/ebf0349c-8283-4951-81be-3b3287372830-kube-api-access-d2fm6\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913540 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913649 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.913697 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-serving-cert\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914056 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktntw\" (UniqueName: \"kubernetes.io/projected/21023c4f-b727-436c-a0e4-c5798ecf85ae-kube-api-access-ktntw\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914205 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914248 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914302 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914390 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpqhf\" (UniqueName: \"kubernetes.io/projected/231d723e-117b-43d1-b664-4e364d7f5d42-kube-api-access-xpqhf\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgwdc\" (UniqueName: \"kubernetes.io/projected/32e89aaf-9047-4609-a161-4329731e4b61-kube-api-access-qgwdc\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914757 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914790 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zlmx\" (UniqueName: \"kubernetes.io/projected/23df6b36-16f8-4198-acc3-6130a4ea9ca8-kube-api-access-8zlmx\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914817 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-config\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914872 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd3d29b5-b209-40c6-9c46-521268f3c363-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.914926 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.915839 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-service-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.915944 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glhk9\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-kube-api-access-glhk9\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916063 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-default-certificate\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916441 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/32e89aaf-9047-4609-a161-4329731e4b61-proxy-tls\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916750 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sd8s\" (UniqueName: \"kubernetes.io/projected/b04b6715-da98-41a2-a034-3ee53201f83d-kube-api-access-5sd8s\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916792 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-client\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916819 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916845 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxnwg\" (UniqueName: \"kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916895 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebf0349c-8283-4951-81be-3b3287372830-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.916931 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:33 crc kubenswrapper[4769]: E0131 16:31:33.917316 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.417272633 +0000 UTC m=+142.491441302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:33 crc kubenswrapper[4769]: I0131 16:31:33.928091 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020310 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020660 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020693 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmvmn\" (UniqueName: \"kubernetes.io/projected/8f4fa765-6c0a-455a-8179-001a1b59ca21-kube-api-access-vmvmn\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020740 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpqhf\" (UniqueName: \"kubernetes.io/projected/231d723e-117b-43d1-b664-4e364d7f5d42-kube-api-access-xpqhf\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020759 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgwdc\" (UniqueName: \"kubernetes.io/projected/32e89aaf-9047-4609-a161-4329731e4b61-kube-api-access-qgwdc\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020778 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-socket-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zlmx\" (UniqueName: \"kubernetes.io/projected/23df6b36-16f8-4198-acc3-6130a4ea9ca8-kube-api-access-8zlmx\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020824 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-config\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020845 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020883 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-499ck\" (UniqueName: \"kubernetes.io/projected/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-kube-api-access-499ck\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020907 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd3d29b5-b209-40c6-9c46-521268f3c363-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020948 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f510600b-438d-4f61-970a-96d7a27e79c3-config\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020968 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-webhook-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.020989 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-service-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021006 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-config-volume\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021031 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-service-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021048 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glhk9\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-kube-api-access-glhk9\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021069 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-default-certificate\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021095 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vkhs\" (UniqueName: \"kubernetes.io/projected/b962d703-4157-4cca-bdbd-107a6fd0d049-kube-api-access-4vkhs\") pod \"migrator-59844c95c7-p7926\" (UID: \"b962d703-4157-4cca-bdbd-107a6fd0d049\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021131 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1bd5479-04b5-4b15-9f75-69b4d3209113-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021151 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a701274a-c195-4938-96dd-9b7b7629ba16-metrics-tls\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021170 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f510600b-438d-4f61-970a-96d7a27e79c3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021185 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-metrics-tls\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/32e89aaf-9047-4609-a161-4329731e4b61-proxy-tls\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021218 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021237 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021253 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-srv-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021303 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-cabundle\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021326 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b04b6715-da98-41a2-a034-3ee53201f83d-service-ca-bundle\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1bd5479-04b5-4b15-9f75-69b4d3209113-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021362 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-mountpoint-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021412 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/32e89aaf-9047-4609-a161-4329731e4b61-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021431 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-stats-auth\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021451 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6n92\" (UniqueName: \"kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021472 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-node-bootstrap-token\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021520 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z9vp\" (UniqueName: \"kubernetes.io/projected/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-kube-api-access-2z9vp\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021578 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/231d723e-117b-43d1-b664-4e364d7f5d42-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021599 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-key\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021672 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cbd22ccf-d447-4c9a-80bb-5d71f8725173-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.021489 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022109 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-images\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022131 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23df6b36-16f8-4198-acc3-6130a4ea9ca8-proxy-tls\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022150 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knvkv\" (UniqueName: \"kubernetes.io/projected/a7004082-a835-4b0c-8ec7-774e94c2cdc5-kube-api-access-knvkv\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-profile-collector-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022193 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82336871-6c99-4ee1-9fad-2eef02dc232b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022211 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szfxz\" (UniqueName: \"kubernetes.io/projected/cbd22ccf-d447-4c9a-80bb-5d71f8725173-kube-api-access-szfxz\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022272 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022289 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022675 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm8fm\" (UniqueName: \"kubernetes.io/projected/119207e3-9106-4692-b4f1-1729e0c567cb-kube-api-access-cm8fm\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022710 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1bd5479-04b5-4b15-9f75-69b4d3209113-config\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022741 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022755 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-registration-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022774 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022794 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlwkt\" (UniqueName: \"kubernetes.io/projected/52ee678e-0f7b-4106-a203-534f9fedf88f-kube-api-access-dlwkt\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022810 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvc4m\" (UniqueName: \"kubernetes.io/projected/f29e610f-b8be-455d-8d71-bdc29b177f27-kube-api-access-fvc4m\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022827 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022843 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022963 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2fm6\" (UniqueName: \"kubernetes.io/projected/ebf0349c-8283-4951-81be-3b3287372830-kube-api-access-d2fm6\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.023260 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.024188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/231d723e-117b-43d1-b664-4e364d7f5d42-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.024882 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.026319 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-service-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.026978 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-images\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.022245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-config\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.027778 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.527744138 +0000 UTC m=+142.601912807 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.027957 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028595 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96jgg\" (UniqueName: \"kubernetes.io/projected/c2af561a-0833-4624-a950-1e1bfb2bacaa-kube-api-access-96jgg\") pod \"downloads-7954f5f757-g2568\" (UID: \"c2af561a-0833-4624-a950-1e1bfb2bacaa\") " pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028642 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d6k6\" (UniqueName: \"kubernetes.io/projected/f218882d-d7ef-4522-aacd-e37b8264eb56-kube-api-access-7d6k6\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028703 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-cert\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028740 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/23df6b36-16f8-4198-acc3-6130a4ea9ca8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028764 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.028814 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-ca\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029322 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029361 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029442 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589818-e1eb-471e-ae20-018136cd7868-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029763 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029795 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae589818-e1eb-471e-ae20-018136cd7868-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029823 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029846 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5g95\" (UniqueName: \"kubernetes.io/projected/5007e4f2-7ccc-4511-95fe-582dee0a5a53-kube-api-access-r5g95\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029879 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.029886 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f9wx\" (UniqueName: \"kubernetes.io/projected/ae589818-e1eb-471e-ae20-018136cd7868-kube-api-access-4f9wx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.030038 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79tkh\" (UniqueName: \"kubernetes.io/projected/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-kube-api-access-79tkh\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.030071 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82336871-6c99-4ee1-9fad-2eef02dc232b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.030116 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.030315 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd3d29b5-b209-40c6-9c46-521268f3c363-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.030796 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031185 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8f4fa765-6c0a-455a-8179-001a1b59ca21-metrics-tls\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031425 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-plugins-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031457 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx2j4\" (UniqueName: \"kubernetes.io/projected/82336871-6c99-4ee1-9fad-2eef02dc232b-kube-api-access-mx2j4\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031534 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f510600b-438d-4f61-970a-96d7a27e79c3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031578 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-apiservice-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031780 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sd8s\" (UniqueName: \"kubernetes.io/projected/b04b6715-da98-41a2-a034-3ee53201f83d-kube-api-access-5sd8s\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031848 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-client\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.031875 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxnwg\" (UniqueName: \"kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.033174 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.033579 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebf0349c-8283-4951-81be-3b3287372830-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.033648 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.033673 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a701274a-c195-4938-96dd-9b7b7629ba16-trusted-ca\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.033939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034094 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034268 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034331 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034390 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd3d29b5-b209-40c6-9c46-521268f3c363-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034817 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.034863 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-csi-data-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.036928 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/32e89aaf-9047-4609-a161-4329731e4b61-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.038242 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b04b6715-da98-41a2-a034-3ee53201f83d-service-ca-bundle\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.038525 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-default-certificate\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039181 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-stats-auth\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77rgj\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039333 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da035601-cc1e-4442-9665-1f76bddceb51-config\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039740 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039806 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039830 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-srv-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039940 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-metrics-certs\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.039989 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/119207e3-9106-4692-b4f1-1729e0c567cb-serving-cert\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040030 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-certs\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040060 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7qks\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-kube-api-access-p7qks\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040079 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-config\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040105 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231d723e-117b-43d1-b664-4e364d7f5d42-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040124 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cqvh\" (UniqueName: \"kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040145 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trnx2\" (UniqueName: \"kubernetes.io/projected/680ae683-4028-4d53-a078-c8c3d357f5ff-kube-api-access-trnx2\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040212 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040241 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbctb\" (UniqueName: \"kubernetes.io/projected/da035601-cc1e-4442-9665-1f76bddceb51-kube-api-access-bbctb\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040266 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/680ae683-4028-4d53-a078-c8c3d357f5ff-tmpfs\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040290 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040312 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-serving-cert\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040334 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktntw\" (UniqueName: \"kubernetes.io/projected/21023c4f-b727-436c-a0e4-c5798ecf85ae-kube-api-access-ktntw\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040352 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da035601-cc1e-4442-9665-1f76bddceb51-serving-cert\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.040938 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.041003 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.041587 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.052035 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.053219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/32e89aaf-9047-4609-a161-4329731e4b61-proxy-tls\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.055069 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/23df6b36-16f8-4198-acc3-6130a4ea9ca8-proxy-tls\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.060299 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b04b6715-da98-41a2-a034-3ee53201f83d-metrics-certs\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.061130 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.061456 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-serving-cert\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.061999 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd3d29b5-b209-40c6-9c46-521268f3c363-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.062627 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.064186 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.064984 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.065046 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.065563 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ebf0349c-8283-4951-81be-3b3287372830-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.074749 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231d723e-117b-43d1-b664-4e364d7f5d42-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.080636 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgwdc\" (UniqueName: \"kubernetes.io/projected/32e89aaf-9047-4609-a161-4329731e4b61-kube-api-access-qgwdc\") pod \"machine-config-controller-84d6567774-sppfg\" (UID: \"32e89aaf-9047-4609-a161-4329731e4b61\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.086786 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/21023c4f-b727-436c-a0e4-c5798ecf85ae-etcd-client\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.094386 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.094748 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpqhf\" (UniqueName: \"kubernetes.io/projected/231d723e-117b-43d1-b664-4e364d7f5d42-kube-api-access-xpqhf\") pod \"kube-storage-version-migrator-operator-b67b599dd-rdqmx\" (UID: \"231d723e-117b-43d1-b664-4e364d7f5d42\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.112060 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zlmx\" (UniqueName: \"kubernetes.io/projected/23df6b36-16f8-4198-acc3-6130a4ea9ca8-kube-api-access-8zlmx\") pod \"machine-config-operator-74547568cd-6q4jz\" (UID: \"23df6b36-16f8-4198-acc3-6130a4ea9ca8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.115974 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.132269 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glhk9\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-kube-api-access-glhk9\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157334 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6n92\" (UniqueName: \"kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-node-bootstrap-token\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z9vp\" (UniqueName: \"kubernetes.io/projected/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-kube-api-access-2z9vp\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157449 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-key\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157467 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cbd22ccf-d447-4c9a-80bb-5d71f8725173-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157487 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knvkv\" (UniqueName: \"kubernetes.io/projected/a7004082-a835-4b0c-8ec7-774e94c2cdc5-kube-api-access-knvkv\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157521 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-profile-collector-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157541 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82336871-6c99-4ee1-9fad-2eef02dc232b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157566 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157584 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szfxz\" (UniqueName: \"kubernetes.io/projected/cbd22ccf-d447-4c9a-80bb-5d71f8725173-kube-api-access-szfxz\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157606 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm8fm\" (UniqueName: \"kubernetes.io/projected/119207e3-9106-4692-b4f1-1729e0c567cb-kube-api-access-cm8fm\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157628 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1bd5479-04b5-4b15-9f75-69b4d3209113-config\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157661 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-registration-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157683 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlwkt\" (UniqueName: \"kubernetes.io/projected/52ee678e-0f7b-4106-a203-534f9fedf88f-kube-api-access-dlwkt\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157707 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvc4m\" (UniqueName: \"kubernetes.io/projected/f29e610f-b8be-455d-8d71-bdc29b177f27-kube-api-access-fvc4m\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96jgg\" (UniqueName: \"kubernetes.io/projected/c2af561a-0833-4624-a950-1e1bfb2bacaa-kube-api-access-96jgg\") pod \"downloads-7954f5f757-g2568\" (UID: \"c2af561a-0833-4624-a950-1e1bfb2bacaa\") " pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157772 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d6k6\" (UniqueName: \"kubernetes.io/projected/f218882d-d7ef-4522-aacd-e37b8264eb56-kube-api-access-7d6k6\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157791 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-cert\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589818-e1eb-471e-ae20-018136cd7868-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157836 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157855 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae589818-e1eb-471e-ae20-018136cd7868-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157876 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5g95\" (UniqueName: \"kubernetes.io/projected/5007e4f2-7ccc-4511-95fe-582dee0a5a53-kube-api-access-r5g95\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157894 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82336871-6c99-4ee1-9fad-2eef02dc232b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157916 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f9wx\" (UniqueName: \"kubernetes.io/projected/ae589818-e1eb-471e-ae20-018136cd7868-kube-api-access-4f9wx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157935 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79tkh\" (UniqueName: \"kubernetes.io/projected/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-kube-api-access-79tkh\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157963 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.157987 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-plugins-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158003 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx2j4\" (UniqueName: \"kubernetes.io/projected/82336871-6c99-4ee1-9fad-2eef02dc232b-kube-api-access-mx2j4\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158023 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8f4fa765-6c0a-455a-8179-001a1b59ca21-metrics-tls\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158041 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f510600b-438d-4f61-970a-96d7a27e79c3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158068 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-apiservice-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158122 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a701274a-c195-4938-96dd-9b7b7629ba16-trusted-ca\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158142 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-csi-data-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158184 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da035601-cc1e-4442-9665-1f76bddceb51-config\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158208 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158226 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-srv-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158256 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7qks\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-kube-api-access-p7qks\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158276 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-config\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158293 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158310 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/119207e3-9106-4692-b4f1-1729e0c567cb-serving-cert\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158327 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-certs\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158370 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cqvh\" (UniqueName: \"kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158391 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trnx2\" (UniqueName: \"kubernetes.io/projected/680ae683-4028-4d53-a078-c8c3d357f5ff-kube-api-access-trnx2\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158414 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbctb\" (UniqueName: \"kubernetes.io/projected/da035601-cc1e-4442-9665-1f76bddceb51-kube-api-access-bbctb\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158435 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/680ae683-4028-4d53-a078-c8c3d357f5ff-tmpfs\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158462 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da035601-cc1e-4442-9665-1f76bddceb51-serving-cert\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmvmn\" (UniqueName: \"kubernetes.io/projected/8f4fa765-6c0a-455a-8179-001a1b59ca21-kube-api-access-vmvmn\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158525 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-socket-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158553 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-499ck\" (UniqueName: \"kubernetes.io/projected/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-kube-api-access-499ck\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158574 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-webhook-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158596 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f510600b-438d-4f61-970a-96d7a27e79c3-config\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158612 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-service-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158629 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-config-volume\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158654 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vkhs\" (UniqueName: \"kubernetes.io/projected/b962d703-4157-4cca-bdbd-107a6fd0d049-kube-api-access-4vkhs\") pod \"migrator-59844c95c7-p7926\" (UID: \"b962d703-4157-4cca-bdbd-107a6fd0d049\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158670 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a701274a-c195-4938-96dd-9b7b7629ba16-metrics-tls\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158695 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1bd5479-04b5-4b15-9f75-69b4d3209113-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158711 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f510600b-438d-4f61-970a-96d7a27e79c3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158727 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-metrics-tls\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158742 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158760 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158782 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158801 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-srv-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158818 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-cabundle\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158837 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1bd5479-04b5-4b15-9f75-69b4d3209113-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158856 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-mountpoint-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.158991 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-mountpoint-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.164933 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ds894"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.165162 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cbd22ccf-d447-4c9a-80bb-5d71f8725173-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.167864 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.170103 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-key\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.171445 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-node-bootstrap-token\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.173021 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1bd5479-04b5-4b15-9f75-69b4d3209113-config\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.173231 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-registration-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.174301 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da035601-cc1e-4442-9665-1f76bddceb51-serving-cert\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.175345 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/680ae683-4028-4d53-a078-c8c3d357f5ff-tmpfs\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.177184 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-cert\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.177773 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae589818-e1eb-471e-ae20-018136cd7868-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.180008 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-service-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.180119 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-socket-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.180417 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f510600b-438d-4f61-970a-96d7a27e79c3-config\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.181385 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.181984 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/119207e3-9106-4692-b4f1-1729e0c567cb-config\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.183012 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-webhook-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.183108 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-csi-data-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.183880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a701274a-c195-4938-96dd-9b7b7629ba16-trusted-ca\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.184309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82336871-6c99-4ee1-9fad-2eef02dc232b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.184334 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da035601-cc1e-4442-9665-1f76bddceb51-config\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.184533 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7004082-a835-4b0c-8ec7-774e94c2cdc5-plugins-dir\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.188703 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-config-volume\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.190465 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8f4fa765-6c0a-455a-8179-001a1b59ca21-metrics-tls\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.190849 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.191129 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8633c7e-4af1-4cf2-ae96-4d91474f25e7-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-w6kkt\" (UID: \"d8633c7e-4af1-4cf2-ae96-4d91474f25e7\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.191269 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.691245889 +0000 UTC m=+142.765414778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.192327 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.192331 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/5007e4f2-7ccc-4511-95fe-582dee0a5a53-signing-cabundle\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.192484 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.228190 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.228740 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/f218882d-d7ef-4522-aacd-e37b8264eb56-certs\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.229472 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82336871-6c99-4ee1-9fad-2eef02dc232b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.229601 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-srv-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.229907 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2fm6\" (UniqueName: \"kubernetes.io/projected/ebf0349c-8283-4951-81be-3b3287372830-kube-api-access-d2fm6\") pod \"cluster-samples-operator-665b6dd947-m58s7\" (UID: \"ebf0349c-8283-4951-81be-3b3287372830\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.230014 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f29e610f-b8be-455d-8d71-bdc29b177f27-profile-collector-cert\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.230017 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/680ae683-4028-4d53-a078-c8c3d357f5ff-apiservice-cert\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.230304 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1bd5479-04b5-4b15-9f75-69b4d3209113-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.230393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f510600b-438d-4f61-970a-96d7a27e79c3-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.230810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.234037 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.238826 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ae589818-e1eb-471e-ae20-018136cd7868-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.239057 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-metrics-tls\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.239120 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee678e-0f7b-4106-a203-534f9fedf88f-srv-cert\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.239061 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a701274a-c195-4938-96dd-9b7b7629ba16-metrics-tls\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.239417 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/119207e3-9106-4692-b4f1-1729e0c567cb-serving-cert\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.239526 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-2jt6j"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.241023 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.246290 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sd8s\" (UniqueName: \"kubernetes.io/projected/b04b6715-da98-41a2-a034-3ee53201f83d-kube-api-access-5sd8s\") pod \"router-default-5444994796-4lpmw\" (UID: \"b04b6715-da98-41a2-a034-3ee53201f83d\") " pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.248487 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.251583 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxnwg\" (UniqueName: \"kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg\") pod \"oauth-openshift-558db77b4-h8nkx\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.257223 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.259705 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.259846 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.759825276 +0000 UTC m=+142.833993945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.260060 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.260450 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.760439692 +0000 UTC m=+142.834608361 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.261223 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77rgj\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.263390 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.268631 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd3d29b5-b209-40c6-9c46-521268f3c363-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhtgl\" (UID: \"fd3d29b5-b209-40c6-9c46-521268f3c363\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.288848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktntw\" (UniqueName: \"kubernetes.io/projected/21023c4f-b727-436c-a0e4-c5798ecf85ae-kube-api-access-ktntw\") pod \"etcd-operator-b45778765-2vgpw\" (UID: \"21023c4f-b727-436c-a0e4-c5798ecf85ae\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.295863 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" Jan 31 16:31:34 crc kubenswrapper[4769]: W0131 16:31:34.312000 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e845173_60d6_4bbb_a479_e752d55a4d7d.slice/crio-266d6ce743222d028543004ac72d109114499b390d818c2261c4fbb6795c16e7 WatchSource:0}: Error finding container 266d6ce743222d028543004ac72d109114499b390d818c2261c4fbb6795c16e7: Status 404 returned error can't find the container with id 266d6ce743222d028543004ac72d109114499b390d818c2261c4fbb6795c16e7 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.312601 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knvkv\" (UniqueName: \"kubernetes.io/projected/a7004082-a835-4b0c-8ec7-774e94c2cdc5-kube-api-access-knvkv\") pod \"csi-hostpathplugin-4pc7f\" (UID: \"a7004082-a835-4b0c-8ec7-774e94c2cdc5\") " pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: W0131 16:31:34.320626 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82fed0ed_2731_404f_a5f7_1552993ecd71.slice/crio-0af9e8ff6f6b08655e2f1e90bf99a510e1585db124d35981394de8135b6bbb49 WatchSource:0}: Error finding container 0af9e8ff6f6b08655e2f1e90bf99a510e1585db124d35981394de8135b6bbb49: Status 404 returned error can't find the container with id 0af9e8ff6f6b08655e2f1e90bf99a510e1585db124d35981394de8135b6bbb49 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.332722 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z9vp\" (UniqueName: \"kubernetes.io/projected/8af0e848-a4c4-4006-9ccb-3440c5da7fc8-kube-api-access-2z9vp\") pod \"dns-default-x87ps\" (UID: \"8af0e848-a4c4-4006-9ccb-3440c5da7fc8\") " pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.361789 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.361978 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.861936149 +0000 UTC m=+142.936104818 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.362256 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.362600 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.862584647 +0000 UTC m=+142.936753316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.363823 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6n92\" (UniqueName: \"kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92\") pod \"collect-profiles-29497950-gf6hx\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.391290 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cqvh\" (UniqueName: \"kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh\") pod \"marketplace-operator-79b997595-59jw4\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.394434 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5g95\" (UniqueName: \"kubernetes.io/projected/5007e4f2-7ccc-4511-95fe-582dee0a5a53-kube-api-access-r5g95\") pod \"service-ca-9c57cc56f-b4pjt\" (UID: \"5007e4f2-7ccc-4511-95fe-582dee0a5a53\") " pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.421294 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.423388 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.433151 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szfxz\" (UniqueName: \"kubernetes.io/projected/cbd22ccf-d447-4c9a-80bb-5d71f8725173-kube-api-access-szfxz\") pod \"package-server-manager-789f6589d5-c5wrc\" (UID: \"cbd22ccf-d447-4c9a-80bb-5d71f8725173\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.443973 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.455322 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.467334 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.467876 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.468158 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.968135416 +0000 UTC m=+143.042304085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.468310 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.468698 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm8fm\" (UniqueName: \"kubernetes.io/projected/119207e3-9106-4692-b4f1-1729e0c567cb-kube-api-access-cm8fm\") pod \"authentication-operator-69f744f599-nxr6j\" (UID: \"119207e3-9106-4692-b4f1-1729e0c567cb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.469084 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:34.969075942 +0000 UTC m=+143.043244611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.479098 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.479783 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlwkt\" (UniqueName: \"kubernetes.io/projected/52ee678e-0f7b-4106-a203-534f9fedf88f-kube-api-access-dlwkt\") pod \"olm-operator-6b444d44fb-sn9v6\" (UID: \"52ee678e-0f7b-4106-a203-534f9fedf88f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.485083 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.499319 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.500186 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvc4m\" (UniqueName: \"kubernetes.io/projected/f29e610f-b8be-455d-8d71-bdc29b177f27-kube-api-access-fvc4m\") pod \"catalog-operator-68c6474976-57kt2\" (UID: \"f29e610f-b8be-455d-8d71-bdc29b177f27\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.519924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.524793 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96jgg\" (UniqueName: \"kubernetes.io/projected/c2af561a-0833-4624-a950-1e1bfb2bacaa-kube-api-access-96jgg\") pod \"downloads-7954f5f757-g2568\" (UID: \"c2af561a-0833-4624-a950-1e1bfb2bacaa\") " pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.526611 4769 generic.go:334] "Generic (PLEG): container finished" podID="54adcb76-b278-4991-9999-d6b5f8c8c1d6" containerID="53bd35b4c3a63a0e4ce68ce63bbcfd5ca4134ba9fa68620e7c94aacc6f7ef6c5" exitCode=0 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.526861 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" event={"ID":"54adcb76-b278-4991-9999-d6b5f8c8c1d6","Type":"ContainerDied","Data":"53bd35b4c3a63a0e4ce68ce63bbcfd5ca4134ba9fa68620e7c94aacc6f7ef6c5"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.534266 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ds894" event={"ID":"2e845173-60d6-4bbb-a479-e752d55a4d7d","Type":"ContainerStarted","Data":"266d6ce743222d028543004ac72d109114499b390d818c2261c4fbb6795c16e7"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.542111 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d6k6\" (UniqueName: \"kubernetes.io/projected/f218882d-d7ef-4522-aacd-e37b8264eb56-kube-api-access-7d6k6\") pod \"machine-config-server-vd8mb\" (UID: \"f218882d-d7ef-4522-aacd-e37b8264eb56\") " pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.550796 4769 generic.go:334] "Generic (PLEG): container finished" podID="6f0058ec-2d51-4750-ba72-32b848e39402" containerID="1ed85ff4dd108d78cef18e3a1f73a9fb2a79a3fe3b522f3d4355798712dbce8c" exitCode=0 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.551367 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" event={"ID":"6f0058ec-2d51-4750-ba72-32b848e39402","Type":"ContainerDied","Data":"1ed85ff4dd108d78cef18e3a1f73a9fb2a79a3fe3b522f3d4355798712dbce8c"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.551391 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" event={"ID":"6f0058ec-2d51-4750-ba72-32b848e39402","Type":"ContainerStarted","Data":"850b4ff67506d352866a06fc4ffc58cdbb1f059cc3dfbcf68c36c511ccb7a5b2"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.561039 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.567212 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trnx2\" (UniqueName: \"kubernetes.io/projected/680ae683-4028-4d53-a078-c8c3d357f5ff-kube-api-access-trnx2\") pod \"packageserver-d55dfcdfc-6gsx5\" (UID: \"680ae683-4028-4d53-a078-c8c3d357f5ff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.567435 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" event={"ID":"1efa9307-bdd7-4ec9-ab59-32196c343838","Type":"ContainerStarted","Data":"d72cc3684fc894a73491f654528b7571e9c9dc2f848db5abd8345958c8664014"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.567482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" event={"ID":"1efa9307-bdd7-4ec9-ab59-32196c343838","Type":"ContainerStarted","Data":"4eb532f556a29102ff49af3d0a160233bcb17c835601fe63f3fb20e807cfd9f1"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.567971 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.569324 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.570052 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.070035103 +0000 UTC m=+143.144203773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.574141 4769 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-79bp9 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.574206 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.579173 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.581843 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" event={"ID":"1ecd290f-188e-4ff0-a52f-6286412a0b5a","Type":"ContainerStarted","Data":"8cd47d2c73bb3afb8df044f7324462642697697a6b1a2f3ce6eab8b5edab53b5"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.581875 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" event={"ID":"1ecd290f-188e-4ff0-a52f-6286412a0b5a","Type":"ContainerStarted","Data":"f52bd91f5db0e3ef767b0632ed2a0ec8adb669ba48e697a21806a61d68b21387"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.581885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" event={"ID":"1ecd290f-188e-4ff0-a52f-6286412a0b5a","Type":"ContainerStarted","Data":"cc4e3072517de066aca6016d6cb117f0d4534758060e93dbd623806c0fc72953"} Jan 31 16:31:34 crc kubenswrapper[4769]: W0131 16:31:34.589951 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod231d723e_117b_43d1_b664_4e364d7f5d42.slice/crio-acbf6fd6ae6fe9f571e32e0b3fa04ffa1512d00b177a94e827248d7a13fc0400 WatchSource:0}: Error finding container acbf6fd6ae6fe9f571e32e0b3fa04ffa1512d00b177a94e827248d7a13fc0400: Status 404 returned error can't find the container with id acbf6fd6ae6fe9f571e32e0b3fa04ffa1512d00b177a94e827248d7a13fc0400 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.598633 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-v4t65" event={"ID":"9b644a0b-f7ef-40f5-8855-0264a224470e","Type":"ContainerStarted","Data":"cadba027c1bc03a8d383f5911ac6ea9c46df4bfd10a9d3d292d315626d4f2ac4"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.598686 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-v4t65" event={"ID":"9b644a0b-f7ef-40f5-8855-0264a224470e","Type":"ContainerStarted","Data":"135a9fb772ebff0e8df891df3fad2dab6e3b5b90b83dcd910841a1cc9c7837a1"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.599639 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.600350 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.602018 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.604245 4769 patch_prober.go:28] interesting pod/console-operator-58897d9998-v4t65 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/readyz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.604298 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-v4t65" podUID="9b644a0b-f7ef-40f5-8855-0264a224470e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/readyz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.612693 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" event={"ID":"f1cfa945-9049-4fe1-bde7-549e1c5d0e39","Type":"ContainerStarted","Data":"16ad5663578e3ee24ce678025d4dcb1d0018d2d5f0b1104969804f920c4183e5"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.612738 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" event={"ID":"f1cfa945-9049-4fe1-bde7-549e1c5d0e39","Type":"ContainerStarted","Data":"788cc8bb7866e5f080995e45c21b9f1a7de64ba610775519633b6ca34de28799"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.613175 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.616716 4769 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-8td6v container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.616764 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.626946 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" event={"ID":"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa","Type":"ContainerStarted","Data":"5b534966415b416e5b2254b68065b098371660f92b4dc53e13fc1be374eabba0"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.626992 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" event={"ID":"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa","Type":"ContainerStarted","Data":"acb9f3b71fd339746f44631d0b424c99b900310136e44541f79a0de3833cdc70"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.635619 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.636758 4769 generic.go:334] "Generic (PLEG): container finished" podID="524dcf6e-3c5c-47db-9941-06a21f9a8194" containerID="a7a90d674dd02a69d6565e6443bb8c6ebc4e959d5a39ba22bcc947c38847c36a" exitCode=0 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.637252 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" event={"ID":"524dcf6e-3c5c-47db-9941-06a21f9a8194","Type":"ContainerDied","Data":"a7a90d674dd02a69d6565e6443bb8c6ebc4e959d5a39ba22bcc947c38847c36a"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.637300 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" event={"ID":"524dcf6e-3c5c-47db-9941-06a21f9a8194","Type":"ContainerStarted","Data":"ebd2abf4a0b43f329e67873fabe23cc19532c0e0549abb671b53ed4a81e112fa"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.655418 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4lpmw" event={"ID":"b04b6715-da98-41a2-a034-3ee53201f83d","Type":"ContainerStarted","Data":"e4f7c0245937e4f6e6f69f9f5aa1c78f6c551549f0941ce59efabfdd241aac17"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.656044 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.658402 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" event={"ID":"82fed0ed-2731-404f-a5f7-1552993ecd71","Type":"ContainerStarted","Data":"0af9e8ff6f6b08655e2f1e90bf99a510e1585db124d35981394de8135b6bbb49"} Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.662245 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.670975 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.674608 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.174593865 +0000 UTC m=+143.248762534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.694304 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f510600b-438d-4f61-970a-96d7a27e79c3-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-9jflr\" (UID: \"f510600b-438d-4f61-970a-96d7a27e79c3\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.694346 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx2j4\" (UniqueName: \"kubernetes.io/projected/82336871-6c99-4ee1-9fad-2eef02dc232b-kube-api-access-mx2j4\") pod \"openshift-apiserver-operator-796bbdcf4f-5xmz7\" (UID: \"82336871-6c99-4ee1-9fad-2eef02dc232b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.694487 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7qks\" (UniqueName: \"kubernetes.io/projected/a701274a-c195-4938-96dd-9b7b7629ba16-kube-api-access-p7qks\") pod \"ingress-operator-5b745b69d9-nbvrj\" (UID: \"a701274a-c195-4938-96dd-9b7b7629ba16\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.694362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmvmn\" (UniqueName: \"kubernetes.io/projected/8f4fa765-6c0a-455a-8179-001a1b59ca21-kube-api-access-vmvmn\") pod \"dns-operator-744455d44c-gwhw8\" (UID: \"8f4fa765-6c0a-455a-8179-001a1b59ca21\") " pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.694732 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbctb\" (UniqueName: \"kubernetes.io/projected/da035601-cc1e-4442-9665-1f76bddceb51-kube-api-access-bbctb\") pod \"service-ca-operator-777779d784-gt8bb\" (UID: \"da035601-cc1e-4442-9665-1f76bddceb51\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.696551 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-499ck\" (UniqueName: \"kubernetes.io/projected/3e62e4ba-8115-4140-b8de-07edd8c6fcfd-kube-api-access-499ck\") pod \"control-plane-machine-set-operator-78cbb6b69f-wx75k\" (UID: \"3e62e4ba-8115-4140-b8de-07edd8c6fcfd\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.703035 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f9wx\" (UniqueName: \"kubernetes.io/projected/ae589818-e1eb-471e-ae20-018136cd7868-kube-api-access-4f9wx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5zm7b\" (UID: \"ae589818-e1eb-471e-ae20-018136cd7868\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.703996 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:34 crc kubenswrapper[4769]: W0131 16:31:34.706786 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32e89aaf_9047_4609_a161_4329731e4b61.slice/crio-b72ff4d2e7156ce9eb14205f10ab8bd88c99d375188eb0529c763ec5ef825106 WatchSource:0}: Error finding container b72ff4d2e7156ce9eb14205f10ab8bd88c99d375188eb0529c763ec5ef825106: Status 404 returned error can't find the container with id b72ff4d2e7156ce9eb14205f10ab8bd88c99d375188eb0529c763ec5ef825106 Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.712345 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vkhs\" (UniqueName: \"kubernetes.io/projected/b962d703-4157-4cca-bdbd-107a6fd0d049-kube-api-access-4vkhs\") pod \"migrator-59844c95c7-p7926\" (UID: \"b962d703-4157-4cca-bdbd-107a6fd0d049\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.719120 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.736608 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.737442 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1bd5479-04b5-4b15-9f75-69b4d3209113-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4nngf\" (UID: \"d1bd5479-04b5-4b15-9f75-69b4d3209113\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.754183 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.754325 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79tkh\" (UniqueName: \"kubernetes.io/projected/e3acf0c2-2cd0-4415-ab96-b3cf283af01a-kube-api-access-79tkh\") pod \"ingress-canary-9vxrh\" (UID: \"e3acf0c2-2cd0-4415-ab96-b3cf283af01a\") " pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.772018 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.772337 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.272307287 +0000 UTC m=+143.346475956 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.772741 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.773645 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.273628884 +0000 UTC m=+143.347797553 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.825983 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9vxrh" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.837184 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-vd8mb" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.873489 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.873840 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.373773143 +0000 UTC m=+143.447941842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.874130 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.879180 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.379155852 +0000 UTC m=+143.453324521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.893459 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2vgpw"] Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.910321 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.918885 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.927409 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.949726 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.950446 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.956771 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.976371 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:34 crc kubenswrapper[4769]: E0131 16:31:34.978760 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.478744176 +0000 UTC m=+143.552912845 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.979330 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" Jan 31 16:31:34 crc kubenswrapper[4769]: I0131 16:31:34.990181 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.106625 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.107665 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.60763895 +0000 UTC m=+143.681807619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.122152 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-b4pjt"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.137604 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.140778 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-x87ps"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.208264 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.208768 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.708749267 +0000 UTC m=+143.782917936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.254591 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-4lpmw" podStartSLOduration=121.254559403 podStartE2EDuration="2m1.254559403s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:35.250617315 +0000 UTC m=+143.324785984" watchObservedRunningTime="2026-01-31 16:31:35.254559403 +0000 UTC m=+143.328728082" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.264133 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.273091 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.284488 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h8nkx"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.287974 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.317248 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.317587 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.817572725 +0000 UTC m=+143.891741394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.355991 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:35 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:35 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:35 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.356046 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.394139 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-v4t65" podStartSLOduration=122.394094582 podStartE2EDuration="2m2.394094582s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:35.390326957 +0000 UTC m=+143.464495636" watchObservedRunningTime="2026-01-31 16:31:35.394094582 +0000 UTC m=+143.468263241" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.423996 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.424429 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:35.92441279 +0000 UTC m=+143.998581459 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.525055 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.525776 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.025764113 +0000 UTC m=+144.099932782 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.629307 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.629582 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.129551823 +0000 UTC m=+144.203720482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.629688 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.630092 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.130076438 +0000 UTC m=+144.204245107 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.715780 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4pc7f"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.719535 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-g2568"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.719811 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.722703 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" event={"ID":"37adecb9-a5fd-4e61-869b-4a04ac424ac0","Type":"ContainerStarted","Data":"344e7e435ea686f5b6aa3c4ca6f18338dac606366f5a0b0a563cd0442f2361ab"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.726439 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" event={"ID":"6f0058ec-2d51-4750-ba72-32b848e39402","Type":"ContainerStarted","Data":"7df82213274e6b645331fecfc13bb494cba1d64bacbff21720ba599132d502c4"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.737219 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.737592 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.237564999 +0000 UTC m=+144.311733668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.743749 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" event={"ID":"fd3d29b5-b209-40c6-9c46-521268f3c363","Type":"ContainerStarted","Data":"b6f53a8387ce493b5b0f383de2c362191f283d3d7c9ef2fb78e24e98ba3e4db7"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.745779 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-4lpmw" event={"ID":"b04b6715-da98-41a2-a034-3ee53201f83d","Type":"ContainerStarted","Data":"ba748a15e77890654deae5cdfa0fdf4469ffe41ccf6ac9139f5ba41475501d6a"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.750287 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" event={"ID":"21023c4f-b727-436c-a0e4-c5798ecf85ae","Type":"ContainerStarted","Data":"21f7fc5a48be21cd7e91c02e46d7a359060b15e8875bc663d1a3fc8724078bcf"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.781274 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" event={"ID":"82fed0ed-2731-404f-a5f7-1552993ecd71","Type":"ContainerStarted","Data":"2515acbc397b26a47c9a24a005b235b1af0a94893b4f48d530d09f2414d8fe87"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.789934 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" event={"ID":"23df6b36-16f8-4198-acc3-6130a4ea9ca8","Type":"ContainerStarted","Data":"e3cf86050cb3fc00c660753bcf509755f04cdf0215b30a7bdbb63ed66b08cea4"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.790125 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" event={"ID":"23df6b36-16f8-4198-acc3-6130a4ea9ca8","Type":"ContainerStarted","Data":"b1aa291892f07e7126fa39c83da227b6731badd7952b195cb8ca44e53e95d3d1"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.799154 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x87ps" event={"ID":"8af0e848-a4c4-4006-9ccb-3440c5da7fc8","Type":"ContainerStarted","Data":"23c4510bc3a571b058efda2c9d96930fdc4a71e1b6b90ae29767f122ef8f93ad"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.806932 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ds894" event={"ID":"2e845173-60d6-4bbb-a479-e752d55a4d7d","Type":"ContainerStarted","Data":"1831a50ef5973584be40db437b098510d97037ce5bf6307f36b0e1f0cf7d42ce"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.811544 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-qt6ps" podStartSLOduration=121.811522805 podStartE2EDuration="2m1.811522805s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:35.768848295 +0000 UTC m=+143.843016964" watchObservedRunningTime="2026-01-31 16:31:35.811522805 +0000 UTC m=+143.885691474" Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.818185 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" event={"ID":"231d723e-117b-43d1-b664-4e364d7f5d42","Type":"ContainerStarted","Data":"acbf6fd6ae6fe9f571e32e0b3fa04ffa1512d00b177a94e827248d7a13fc0400"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.820605 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" event={"ID":"aecc20b3-16e2-4d56-93ec-2c62b4a45e56","Type":"ContainerStarted","Data":"f5ba5eda851bf4ef32c3aeebe4a99bcbfd8f8000d4d440bb014f9af0657ce902"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.821365 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" event={"ID":"5007e4f2-7ccc-4511-95fe-582dee0a5a53","Type":"ContainerStarted","Data":"f9a319962cd62a2452e5775242ac0eca23b611acb638693e259ae5d9755a2955"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.824166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vd8mb" event={"ID":"f218882d-d7ef-4522-aacd-e37b8264eb56","Type":"ContainerStarted","Data":"1e849cc5561488af0743aa6ffead82f6f714365c947d9c3b30e3d8386fc5ece3"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.831444 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" event={"ID":"32e89aaf-9047-4609-a161-4329731e4b61","Type":"ContainerStarted","Data":"c745c6a06ab69a7fbb7601a87426477c28452199f72fc5b0e6e5c3ecb1db7b95"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.831471 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" event={"ID":"32e89aaf-9047-4609-a161-4329731e4b61","Type":"ContainerStarted","Data":"b72ff4d2e7156ce9eb14205f10ab8bd88c99d375188eb0529c763ec5ef825106"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.838216 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.839745 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.339732225 +0000 UTC m=+144.413900984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.860799 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" event={"ID":"c57a349c-3bb6-4a77-8a0a-59683f544d6d","Type":"ContainerStarted","Data":"3bf21dbf0e92471ae1c18867e9ee367a366a8fd06409e255c439ce3631d260cd"} Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.872863 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc"] Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.960740 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:35 crc kubenswrapper[4769]: E0131 16:31:35.962133 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.46211715 +0000 UTC m=+144.536285819 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:35 crc kubenswrapper[4769]: I0131 16:31:35.989059 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" event={"ID":"0267e024-1c4b-49cd-b1b3-f4afe91bbbfa","Type":"ContainerStarted","Data":"baec6d5d56be9ae99ea56199fe83023063c809fb24fe6c6f9e9972a1996fa27f"} Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.007092 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.018802 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" event={"ID":"524dcf6e-3c5c-47db-9941-06a21f9a8194","Type":"ContainerStarted","Data":"53522b0ea9a9a3f7b45267cb796a40697f539c4b85b9dfe08dc3253c89707384"} Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.019399 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.050479 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.057286 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.066066 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.069677 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.569659814 +0000 UTC m=+144.643828483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.097187 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-nxr6j"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.102435 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" event={"ID":"d8633c7e-4af1-4cf2-ae96-4d91474f25e7","Type":"ContainerStarted","Data":"0525c8bccd0320148271b8affc0204fe2615621637b028c42e2dad411e7c3a3e"} Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.128174 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-v4t65" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.153455 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.168285 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.168886 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.171158 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.67114468 +0000 UTC m=+144.745313349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.268952 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:36 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:36 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:36 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.269027 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.269761 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" podStartSLOduration=122.269708685 podStartE2EDuration="2m2.269708685s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.261040705 +0000 UTC m=+144.335209384" watchObservedRunningTime="2026-01-31 16:31:36.269708685 +0000 UTC m=+144.343877354" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.273634 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:36 crc kubenswrapper[4769]: W0131 16:31:36.275327 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod680ae683_4028_4d53_a078_c8c3d357f5ff.slice/crio-d803d1ec21af963ee901628dec74d1902f7b30fafb775017540837804bb964fc WatchSource:0}: Error finding container d803d1ec21af963ee901628dec74d1902f7b30fafb775017540837804bb964fc: Status 404 returned error can't find the container with id d803d1ec21af963ee901628dec74d1902f7b30fafb775017540837804bb964fc Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.276471 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.776441681 +0000 UTC m=+144.850610350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.350152 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.360395 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9vxrh"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.382575 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.384818 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.385326 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.885296582 +0000 UTC m=+144.959465251 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.387906 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.388426 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.888408727 +0000 UTC m=+144.962577396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.424762 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.464897 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" podStartSLOduration=122.464840861 podStartE2EDuration="2m2.464840861s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.442794282 +0000 UTC m=+144.516962951" watchObservedRunningTime="2026-01-31 16:31:36.464840861 +0000 UTC m=+144.539009520" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.480768 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.493083 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.493431 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:36.993412751 +0000 UTC m=+145.067581420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.538659 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cvn6z" podStartSLOduration=123.538634771 podStartE2EDuration="2m3.538634771s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.508709234 +0000 UTC m=+144.582877903" watchObservedRunningTime="2026-01-31 16:31:36.538634771 +0000 UTC m=+144.612803440" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.555093 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.601539 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.602391 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.102378525 +0000 UTC m=+145.176547194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.602422 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.649524 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.692960 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-gwhw8"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.707806 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.708235 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.208215712 +0000 UTC m=+145.282384381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.708275 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" podStartSLOduration=122.708255223 podStartE2EDuration="2m2.708255223s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.707162252 +0000 UTC m=+144.781330921" watchObservedRunningTime="2026-01-31 16:31:36.708255223 +0000 UTC m=+144.782423882" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.762488 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" podStartSLOduration=122.762464061 podStartE2EDuration="2m2.762464061s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.745763909 +0000 UTC m=+144.819932578" watchObservedRunningTime="2026-01-31 16:31:36.762464061 +0000 UTC m=+144.836632730" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.763917 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr"] Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.812260 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.812625 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.312612338 +0000 UTC m=+145.386780997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.814134 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-ds894" podStartSLOduration=123.81411805 podStartE2EDuration="2m3.81411805s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.804627497 +0000 UTC m=+144.878796166" watchObservedRunningTime="2026-01-31 16:31:36.81411805 +0000 UTC m=+144.888286719" Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.918776 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:36 crc kubenswrapper[4769]: E0131 16:31:36.925969 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.425932512 +0000 UTC m=+145.500101181 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:36 crc kubenswrapper[4769]: I0131 16:31:36.926891 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" podStartSLOduration=123.926864258 podStartE2EDuration="2m3.926864258s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:36.90052761 +0000 UTC m=+144.974696289" watchObservedRunningTime="2026-01-31 16:31:36.926864258 +0000 UTC m=+145.001032927" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.029367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.029819 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.529802824 +0000 UTC m=+145.603971493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.133442 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.134130 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.634100179 +0000 UTC m=+145.708268838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.134575 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.134994 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.634977352 +0000 UTC m=+145.709146021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.156997 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x87ps" event={"ID":"8af0e848-a4c4-4006-9ccb-3440c5da7fc8","Type":"ContainerStarted","Data":"c22bd8b9ffe95ecc23d7a61b095913841a832f74b1f8f09dbd9ec7c57b305a1d"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.175801 4769 csr.go:261] certificate signing request csr-mfc4b is approved, waiting to be issued Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.186144 4769 csr.go:257] certificate signing request csr-mfc4b is issued Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.203759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" event={"ID":"82336871-6c99-4ee1-9fad-2eef02dc232b","Type":"ContainerStarted","Data":"aa7ae2de3f76f7100e2392c53f5dcfe90aceb56cc12d8f6ed750d223af93eee3"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.213970 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" event={"ID":"52ee678e-0f7b-4106-a203-534f9fedf88f","Type":"ContainerStarted","Data":"f4a18c929f90e1ef1a0b5504a3c01ee11a3efc753adc64036ef59a8dfb9505ad"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.231447 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" event={"ID":"fd3d29b5-b209-40c6-9c46-521268f3c363","Type":"ContainerStarted","Data":"6d50c5e1c6f4a3342f20f6c64d187db7b6cecf7002aa1db16be87faa1e2351a1"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.239230 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.239653 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.739632857 +0000 UTC m=+145.813801526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.241370 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" event={"ID":"f29e610f-b8be-455d-8d71-bdc29b177f27","Type":"ContainerStarted","Data":"ded5c162d1acb245d0a9cc93bf588bcadc4dd51525f461d7750e8d3b3b01ba30"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.244337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" event={"ID":"23df6b36-16f8-4198-acc3-6130a4ea9ca8","Type":"ContainerStarted","Data":"e3da46a3805cd3f777190b8a0450d0a837e9bc7d0be39b01243869cf2876948b"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.254814 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" event={"ID":"119207e3-9106-4692-b4f1-1729e0c567cb","Type":"ContainerStarted","Data":"ce33367e114f06f76a5399a9851a5ef0e2fb50d4ceeefa6ab09ddd1e503725e4"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.254828 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhtgl" podStartSLOduration=123.254806526 podStartE2EDuration="2m3.254806526s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.253969073 +0000 UTC m=+145.328137742" watchObservedRunningTime="2026-01-31 16:31:37.254806526 +0000 UTC m=+145.328975195" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.268778 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:37 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:37 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:37 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.268853 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.279020 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" event={"ID":"aecc20b3-16e2-4d56-93ec-2c62b4a45e56","Type":"ContainerStarted","Data":"76943742668c5c20bee77cf85f46bc951d40d6b1d5ecd9c3720b69e068175428"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.279900 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-6q4jz" podStartSLOduration=123.27987766 podStartE2EDuration="2m3.27987766s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.276668851 +0000 UTC m=+145.350837520" watchObservedRunningTime="2026-01-31 16:31:37.27987766 +0000 UTC m=+145.354046329" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.289859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rdqmx" event={"ID":"231d723e-117b-43d1-b664-4e364d7f5d42","Type":"ContainerStarted","Data":"f73daf646052c95d61a2a819606a3792f62abedcb63d593a1dfdede8b86d18fc"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.310808 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" event={"ID":"d8633c7e-4af1-4cf2-ae96-4d91474f25e7","Type":"ContainerStarted","Data":"8aa953e54cda66927c6b40283cdf5bbf972199deea75bda22936b41df430eb76"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.327254 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9vxrh" event={"ID":"e3acf0c2-2cd0-4415-ab96-b3cf283af01a","Type":"ContainerStarted","Data":"e922e57bb1c5c1c20f5f906a8380df762cadb70701b143feffa4d59efcc2db3d"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.339268 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" event={"ID":"54adcb76-b278-4991-9999-d6b5f8c8c1d6","Type":"ContainerStarted","Data":"12531e58ee84fe0c6961d4ce052f22d068fab07579f730b296d01e2d14b60012"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.341127 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.348007 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" podStartSLOduration=97.347986023 podStartE2EDuration="1m37.347986023s" podCreationTimestamp="2026-01-31 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.326128338 +0000 UTC m=+145.400297007" watchObservedRunningTime="2026-01-31 16:31:37.347986023 +0000 UTC m=+145.422154682" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.348725 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.848703412 +0000 UTC m=+145.922872081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.350707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" event={"ID":"3e62e4ba-8115-4140-b8de-07edd8c6fcfd","Type":"ContainerStarted","Data":"e4cfa8663e4b46eab83ad53fc2a5084a786f6066c080100d964770f5903cb8ea"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.359382 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" event={"ID":"d1bd5479-04b5-4b15-9f75-69b4d3209113","Type":"ContainerStarted","Data":"c3a963a097e00a8c50ec8057d0178fdef47e749e411a4cb3c2298bdc442bf696"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.369772 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" event={"ID":"a701274a-c195-4938-96dd-9b7b7629ba16","Type":"ContainerStarted","Data":"44866a793a6bef0bb4acf253fb07c3a008b0019cddac5fd3a9ba59faa7895800"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.439827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" event={"ID":"21023c4f-b727-436c-a0e4-c5798ecf85ae","Type":"ContainerStarted","Data":"7cb8b4b421e1fb25aab0f5205c8fb55e211b2eb24dfd1421b66a6a8c086c69ab"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.441931 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.442192 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.942152937 +0000 UTC m=+146.016321606 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.442390 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.444099 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:37.94408036 +0000 UTC m=+146.018249019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.464054 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" event={"ID":"680ae683-4028-4d53-a078-c8c3d357f5ff","Type":"ContainerStarted","Data":"d803d1ec21af963ee901628dec74d1902f7b30fafb775017540837804bb964fc"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.471522 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-w6kkt" podStartSLOduration=123.471502789 podStartE2EDuration="2m3.471502789s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.349440063 +0000 UTC m=+145.423608722" watchObservedRunningTime="2026-01-31 16:31:37.471502789 +0000 UTC m=+145.545671468" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.478303 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-vd8mb" event={"ID":"f218882d-d7ef-4522-aacd-e37b8264eb56","Type":"ContainerStarted","Data":"5f93f0465aaa26dda93b2f453cbf96ed36ee2febc61282309e6d4717d2e5efae"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.482880 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-2vgpw" podStartSLOduration=123.482859913 podStartE2EDuration="2m3.482859913s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.470397738 +0000 UTC m=+145.544566407" watchObservedRunningTime="2026-01-31 16:31:37.482859913 +0000 UTC m=+145.557028582" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.520205 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" event={"ID":"8f4fa765-6c0a-455a-8179-001a1b59ca21","Type":"ContainerStarted","Data":"c47d0f1c47d2a46eb32d5b2ad2e8349602e5c61a27993d5c5c97817c5055f67c"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.521872 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" event={"ID":"f510600b-438d-4f61-970a-96d7a27e79c3","Type":"ContainerStarted","Data":"dcc1080db4eb1ece257be47b5d9ec67618776cee651023fdcfec2e0ac113ce65"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.522764 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" event={"ID":"da035601-cc1e-4442-9665-1f76bddceb51","Type":"ContainerStarted","Data":"439037db357ca532e973713db7f497805d30d1d9eeb82135ab560d6e3fbc308a"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.523939 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" event={"ID":"b962d703-4157-4cca-bdbd-107a6fd0d049","Type":"ContainerStarted","Data":"b544bd1ac1da69d454a33ab5b717863602b9abd44323294a9d23a08751793a46"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.547734 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.550620 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.050597326 +0000 UTC m=+146.124765995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.559099 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" event={"ID":"c57a349c-3bb6-4a77-8a0a-59683f544d6d","Type":"ContainerStarted","Data":"3ddacbe469aee19591bea55f769bcd54c2cdadb6c1feeab32fa1ce8320a01724"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.559530 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.569705 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" event={"ID":"37adecb9-a5fd-4e61-869b-4a04ac424ac0","Type":"ContainerStarted","Data":"aa088c60e088a9f8c3a9899946bf609f1c5250a4c20484ebea483b036ac25038"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.570675 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.581322 4769 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-h8nkx container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.581389 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.607972 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-vd8mb" podStartSLOduration=6.607956132 podStartE2EDuration="6.607956132s" podCreationTimestamp="2026-01-31 16:31:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.52035661 +0000 UTC m=+145.594525279" watchObservedRunningTime="2026-01-31 16:31:37.607956132 +0000 UTC m=+145.682124801" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.614289 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-59jw4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.614326 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.614905 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-g2568" event={"ID":"c2af561a-0833-4624-a950-1e1bfb2bacaa","Type":"ContainerStarted","Data":"65a5ae599a18776c239e45884272e1a86ed9f9fdc37f022dcaf767e713bc3cef"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.614963 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-g2568" event={"ID":"c2af561a-0833-4624-a950-1e1bfb2bacaa","Type":"ContainerStarted","Data":"4ee55ef325dc4f2b0a77523ba8275d0a9db9ddecf826e1879efd74b570de7618"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.615264 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.624653 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2568 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.624714 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2568" podUID="c2af561a-0833-4624-a950-1e1bfb2bacaa" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.659894 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.661783 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" event={"ID":"ae589818-e1eb-471e-ae20-018136cd7868","Type":"ContainerStarted","Data":"243087b4d738a630a60b59c205db2566ca249da3338b49ffdf1f0fc09c898941"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.662408 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" podStartSLOduration=123.662371607 podStartE2EDuration="2m3.662371607s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.609438093 +0000 UTC m=+145.683606762" watchObservedRunningTime="2026-01-31 16:31:37.662371607 +0000 UTC m=+145.736540276" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.667678 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" podStartSLOduration=123.667557821 podStartE2EDuration="2m3.667557821s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.658824559 +0000 UTC m=+145.732993228" watchObservedRunningTime="2026-01-31 16:31:37.667557821 +0000 UTC m=+145.741726510" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.662660 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.162647674 +0000 UTC m=+146.236816343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.680926 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" event={"ID":"ebf0349c-8283-4951-81be-3b3287372830","Type":"ContainerStarted","Data":"9c78d37037a6250b217fc94051cc5bd49582bafef50fd5361ce07811ba6028b2"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.681011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" event={"ID":"ebf0349c-8283-4951-81be-3b3287372830","Type":"ContainerStarted","Data":"db960ff1dc24ce1b71b35f430a7e82d3d48028d249959c3c44ee63765d0c2139"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.684564 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" event={"ID":"cbd22ccf-d447-4c9a-80bb-5d71f8725173","Type":"ContainerStarted","Data":"02e1a125d0b2dc35e702361554d719bdd02cfab1420453d30742867345101dcc"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.684970 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" event={"ID":"cbd22ccf-d447-4c9a-80bb-5d71f8725173","Type":"ContainerStarted","Data":"a3dc9e976e9e7852a46b90c3549c084aacb6945222d102c27a89c14fb66aadde"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.688185 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-g2568" podStartSLOduration=123.688160791 podStartE2EDuration="2m3.688160791s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.686256217 +0000 UTC m=+145.760424886" watchObservedRunningTime="2026-01-31 16:31:37.688160791 +0000 UTC m=+145.762329460" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.742829 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" event={"ID":"32e89aaf-9047-4609-a161-4329731e4b61","Type":"ContainerStarted","Data":"9e57b58d9af0f1797d247e5db43c8b8e134f942946e9cdddbaa96883bcaffcfd"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.753486 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" event={"ID":"5007e4f2-7ccc-4511-95fe-582dee0a5a53","Type":"ContainerStarted","Data":"97c2f00c35f74790fda5c5b6d460a49fba9164a4efd09f5c94e115d46e7793e1"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.771058 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.771685 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.271641198 +0000 UTC m=+146.345809867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.784636 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sppfg" podStartSLOduration=123.784618397 podStartE2EDuration="2m3.784618397s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.7742105 +0000 UTC m=+145.848379169" watchObservedRunningTime="2026-01-31 16:31:37.784618397 +0000 UTC m=+145.858787066" Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.874393 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:37 crc kubenswrapper[4769]: E0131 16:31:37.874956 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.374944525 +0000 UTC m=+146.449113184 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.931561 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" event={"ID":"a7004082-a835-4b0c-8ec7-774e94c2cdc5","Type":"ContainerStarted","Data":"2ad72cd1394113ba8effbbbf565d7c3d34c3e172f1c596556083023c6b35bae5"} Jan 31 16:31:37 crc kubenswrapper[4769]: I0131 16:31:37.966943 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" event={"ID":"82fed0ed-2731-404f-a5f7-1552993ecd71","Type":"ContainerStarted","Data":"e73d2390a49eb95aee0062bbc1b6a77c01a670ef2cd0e107b78ee1bb3db1866d"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.032706 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-2jt6j" podStartSLOduration=124.032692348 podStartE2EDuration="2m4.032692348s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:38.031981638 +0000 UTC m=+146.106150307" watchObservedRunningTime="2026-01-31 16:31:38.032692348 +0000 UTC m=+146.106861007" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.033432 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.033797 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.533782007 +0000 UTC m=+146.607950676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.034452 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-b4pjt" podStartSLOduration=124.034445296 podStartE2EDuration="2m4.034445296s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:37.82630346 +0000 UTC m=+145.900472129" watchObservedRunningTime="2026-01-31 16:31:38.034445296 +0000 UTC m=+146.108613965" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.115606 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.138795 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.139515 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.141302 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.64128282 +0000 UTC m=+146.715451489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.187373 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-31 16:26:37 +0000 UTC, rotation deadline is 2026-12-18 17:04:28.285818899 +0000 UTC Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.187414 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7704h32m50.098407773s for next certificate rotation Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.240930 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.241333 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.741317927 +0000 UTC m=+146.815486596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.275638 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:38 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:38 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:38 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.276002 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.343613 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.343962 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.843950185 +0000 UTC m=+146.918118854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.446247 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.446914 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:38.946898631 +0000 UTC m=+147.021067300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.548937 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.549311 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.049297904 +0000 UTC m=+147.123466573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.650678 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.651005 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.150990365 +0000 UTC m=+147.225159034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.735943 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.752512 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.752820 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.252804221 +0000 UTC m=+147.326972890 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.854009 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.854174 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.354150113 +0000 UTC m=+147.428318782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.854355 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.854770 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.35475667 +0000 UTC m=+147.428925339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.955373 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.955615 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.455588559 +0000 UTC m=+147.529757228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.955863 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:38 crc kubenswrapper[4769]: E0131 16:31:38.956174 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.456166734 +0000 UTC m=+147.530335403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.980071 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" event={"ID":"d1bd5479-04b5-4b15-9f75-69b4d3209113","Type":"ContainerStarted","Data":"4498c57ff14348749c95ad997dbc673303d6a37109aa2e1f2f4d97bbaa8b0874"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.982872 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" event={"ID":"680ae683-4028-4d53-a078-c8c3d357f5ff","Type":"ContainerStarted","Data":"272dc71e00c971f44d9f69427b7b1b7284729be26eeee42fa29a1ff7776b054b"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.983037 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.986404 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" event={"ID":"f29e610f-b8be-455d-8d71-bdc29b177f27","Type":"ContainerStarted","Data":"006d2257dade02464f18abe325768cb538fd15194fdfd8407a28faf07846254f"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.986572 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.989651 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" event={"ID":"f510600b-438d-4f61-970a-96d7a27e79c3","Type":"ContainerStarted","Data":"b54b0ce67dcef48498a9da479b8bf81dd797b4183970ec6ba56c446db90d6672"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.993536 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" event={"ID":"a701274a-c195-4938-96dd-9b7b7629ba16","Type":"ContainerStarted","Data":"b913727bd7ac1a9743719fcc11e5ee209ffc5f09dc824b219145111ea5c7b550"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.993578 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" event={"ID":"a701274a-c195-4938-96dd-9b7b7629ba16","Type":"ContainerStarted","Data":"96ddf0a9bab74264b9b63fcd2e8d6a07b47da837deeb6619efc2513a5ba22edb"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.997147 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.997766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" event={"ID":"3e62e4ba-8115-4140-b8de-07edd8c6fcfd","Type":"ContainerStarted","Data":"c11da3b805c3f36aac605bf0844874aa01a407341330e1733740b0808b236f6a"} Jan 31 16:31:38 crc kubenswrapper[4769]: I0131 16:31:38.999488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" event={"ID":"119207e3-9106-4692-b4f1-1729e0c567cb","Type":"ContainerStarted","Data":"f4d47c81c3264513b6d137ee3977401d1302268a1b1ead6ffdabd87c70fed5c4"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.004364 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" event={"ID":"6f0058ec-2d51-4750-ba72-32b848e39402","Type":"ContainerStarted","Data":"65875db660424400cb194edd901dba34c016d2a580c6e72c0dcce856d4122f5b"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.010053 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" event={"ID":"da035601-cc1e-4442-9665-1f76bddceb51","Type":"ContainerStarted","Data":"5581d1fc962bdf2bafc9fb0a0d75d69ca922ebe0df90bf5a116adfeb66e7e0c0"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.011573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" event={"ID":"ae589818-e1eb-471e-ae20-018136cd7868","Type":"ContainerStarted","Data":"9062fe8fbe7b6667b6570ce7d6a77d2afa75595aa9669470c69103d2a4e30564"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.020256 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9vxrh" event={"ID":"e3acf0c2-2cd0-4415-ab96-b3cf283af01a","Type":"ContainerStarted","Data":"44f5be60a4b13526c62a2182b7ef4dadce0a58d02621cad5aff36a27888ac3ba"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.049937 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4nngf" podStartSLOduration=125.049921547 podStartE2EDuration="2m5.049921547s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.04966311 +0000 UTC m=+147.123831779" watchObservedRunningTime="2026-01-31 16:31:39.049921547 +0000 UTC m=+147.124090206" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.051613 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" event={"ID":"cbd22ccf-d447-4c9a-80bb-5d71f8725173","Type":"ContainerStarted","Data":"b55cb901d742243fbc7a078225e25ba900bf3a72247961fbcdf75cd91bf57494"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.051845 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.056335 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.056472 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.556454118 +0000 UTC m=+147.630622787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.056613 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" event={"ID":"82336871-6c99-4ee1-9fad-2eef02dc232b","Type":"ContainerStarted","Data":"f486c32682de806d5a8cb7d77aaacc0791a6a0cf1ca5911061887ce77ccfeb06"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.056667 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.056976 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.556952541 +0000 UTC m=+147.631121200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.093195 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" event={"ID":"a7004082-a835-4b0c-8ec7-774e94c2cdc5","Type":"ContainerStarted","Data":"bb1958ce05349322d54d738589cb622874ae219d48dd1ec959db01d398384115"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.111960 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x87ps" event={"ID":"8af0e848-a4c4-4006-9ccb-3440c5da7fc8","Type":"ContainerStarted","Data":"80cbb1060ae3263ff465479e3fb73df0e365d39ebe9b845148acafbc21dec6fd"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.112505 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.126656 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" event={"ID":"b962d703-4157-4cca-bdbd-107a6fd0d049","Type":"ContainerStarted","Data":"db95ea1d206aa91865ab891137c098aa1b65e43226928c2cbc9fec3e4854da1a"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.126700 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" event={"ID":"b962d703-4157-4cca-bdbd-107a6fd0d049","Type":"ContainerStarted","Data":"7f60c031ca7b6f9b6939886fd76d02584448395bbdf55213655da7b22d011d98"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.157908 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.160087 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.660065013 +0000 UTC m=+147.734233712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.161002 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" event={"ID":"52ee678e-0f7b-4106-a203-534f9fedf88f","Type":"ContainerStarted","Data":"ee91d7575946ed56cb079d2ba05cb109301347fc878f493ac7e0cc8e9eebe440"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.161525 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.163587 4769 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-sn9v6 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.163624 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" podUID="52ee678e-0f7b-4106-a203-534f9fedf88f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.179616 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57kt2" podStartSLOduration=125.179597653 podStartE2EDuration="2m5.179597653s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.111444208 +0000 UTC m=+147.185612887" watchObservedRunningTime="2026-01-31 16:31:39.179597653 +0000 UTC m=+147.253766322" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.180891 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-nxr6j" podStartSLOduration=125.180885379 podStartE2EDuration="2m5.180885379s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.17915381 +0000 UTC m=+147.253322479" watchObservedRunningTime="2026-01-31 16:31:39.180885379 +0000 UTC m=+147.255054048" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.192916 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" event={"ID":"ebf0349c-8283-4951-81be-3b3287372830","Type":"ContainerStarted","Data":"d23fb8281c3e361a643d2278d99aae542b8faaf1d24535ef4d6c4ac2930b71ec"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.214386 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-v5mpd" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.215960 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" event={"ID":"8f4fa765-6c0a-455a-8179-001a1b59ca21","Type":"ContainerStarted","Data":"1ae2fc34a05d8e48e96ab1956c1d413b87a3ce9c62a225040e2c55e35721ca96"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.215989 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" event={"ID":"8f4fa765-6c0a-455a-8179-001a1b59ca21","Type":"ContainerStarted","Data":"a831922a378f39cd9b6d3876e148bd567c00113d471efa9e166e34dacd2a4cd1"} Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.234743 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2568 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.234805 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2568" podUID="c2af561a-0833-4624-a950-1e1bfb2bacaa" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.238876 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wx75k" podStartSLOduration=125.238855912 podStartE2EDuration="2m5.238855912s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.23192959 +0000 UTC m=+147.306098249" watchObservedRunningTime="2026-01-31 16:31:39.238855912 +0000 UTC m=+147.313024581" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.244510 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-59jw4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.244555 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.253744 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlgn6" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.260992 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.263182 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.763167364 +0000 UTC m=+147.837336033 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.269216 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:39 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:39 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:39 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.269309 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.275219 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" podStartSLOduration=125.275195417 podStartE2EDuration="2m5.275195417s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.269948181 +0000 UTC m=+147.344116860" watchObservedRunningTime="2026-01-31 16:31:39.275195417 +0000 UTC m=+147.349364086" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.310739 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" podStartSLOduration=125.310718839 podStartE2EDuration="2m5.310718839s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.302979665 +0000 UTC m=+147.377148334" watchObservedRunningTime="2026-01-31 16:31:39.310718839 +0000 UTC m=+147.384887508" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.327310 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5zm7b" podStartSLOduration=125.327292757 podStartE2EDuration="2m5.327292757s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.326069603 +0000 UTC m=+147.400238272" watchObservedRunningTime="2026-01-31 16:31:39.327292757 +0000 UTC m=+147.401461426" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.368036 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.370403 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.870378649 +0000 UTC m=+147.944547408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.405013 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-9jflr" podStartSLOduration=125.404989586 podStartE2EDuration="2m5.404989586s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.378460943 +0000 UTC m=+147.452629612" watchObservedRunningTime="2026-01-31 16:31:39.404989586 +0000 UTC m=+147.479158255" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.406912 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-gt8bb" podStartSLOduration=125.406907479 podStartE2EDuration="2m5.406907479s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.404135352 +0000 UTC m=+147.478304021" watchObservedRunningTime="2026-01-31 16:31:39.406907479 +0000 UTC m=+147.481076148" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.469921 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.470272 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:39.970260981 +0000 UTC m=+148.044429650 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.489971 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nbvrj" podStartSLOduration=125.489953475 podStartE2EDuration="2m5.489953475s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.457410925 +0000 UTC m=+147.531579594" watchObservedRunningTime="2026-01-31 16:31:39.489953475 +0000 UTC m=+147.564122154" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.492155 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-9vxrh" podStartSLOduration=8.492149806 podStartE2EDuration="8.492149806s" podCreationTimestamp="2026-01-31 16:31:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.488877136 +0000 UTC m=+147.563045815" watchObservedRunningTime="2026-01-31 16:31:39.492149806 +0000 UTC m=+147.566318475" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.571973 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.572368 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.072352984 +0000 UTC m=+148.146521653 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.619697 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p7926" podStartSLOduration=125.619682173 podStartE2EDuration="2m5.619682173s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.572664942 +0000 UTC m=+147.646833601" watchObservedRunningTime="2026-01-31 16:31:39.619682173 +0000 UTC m=+147.693850842" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.641243 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.676280 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.676697 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.176684679 +0000 UTC m=+148.250853348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.705002 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" podStartSLOduration=125.704985922 podStartE2EDuration="2m5.704985922s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.703822089 +0000 UTC m=+147.777990758" watchObservedRunningTime="2026-01-31 16:31:39.704985922 +0000 UTC m=+147.779154591" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.705748 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" podStartSLOduration=125.705743442 podStartE2EDuration="2m5.705743442s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.661781117 +0000 UTC m=+147.735949786" watchObservedRunningTime="2026-01-31 16:31:39.705743442 +0000 UTC m=+147.779912101" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.751093 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-5xmz7" podStartSLOduration=125.751078977 podStartE2EDuration="2m5.751078977s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.74975924 +0000 UTC m=+147.823927909" watchObservedRunningTime="2026-01-31 16:31:39.751078977 +0000 UTC m=+147.825247646" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.777129 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.777548 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.277532378 +0000 UTC m=+148.351701047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.796307 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-gwhw8" podStartSLOduration=125.796288906 podStartE2EDuration="2m5.796288906s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.794324352 +0000 UTC m=+147.868493031" watchObservedRunningTime="2026-01-31 16:31:39.796288906 +0000 UTC m=+147.870457565" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.879024 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.879306 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.379295171 +0000 UTC m=+148.453463830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.893917 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-m58s7" podStartSLOduration=126.893899276 podStartE2EDuration="2m6.893899276s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.892060925 +0000 UTC m=+147.966229594" watchObservedRunningTime="2026-01-31 16:31:39.893899276 +0000 UTC m=+147.968067945" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.962486 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-x87ps" podStartSLOduration=8.962468662 podStartE2EDuration="8.962468662s" podCreationTimestamp="2026-01-31 16:31:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:39.917023025 +0000 UTC m=+147.991191694" watchObservedRunningTime="2026-01-31 16:31:39.962468662 +0000 UTC m=+148.036637331" Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.980088 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:39 crc kubenswrapper[4769]: E0131 16:31:39.980469 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.48045408 +0000 UTC m=+148.554622749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.983581 4769 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6gsx5 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 31 16:31:39 crc kubenswrapper[4769]: I0131 16:31:39.986046 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" podUID="680ae683-4028-4d53-a078-c8c3d357f5ff" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.082145 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.082437 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.58242645 +0000 UTC m=+148.656595119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.182968 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.183116 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.683078462 +0000 UTC m=+148.757247131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.183212 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.183566 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.683558136 +0000 UTC m=+148.757726805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.253433 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" event={"ID":"a7004082-a835-4b0c-8ec7-774e94c2cdc5","Type":"ContainerStarted","Data":"6374857e8da16bf27f9bb49136617cd91a32d18cf0d573a137b4dd8789e67c3e"} Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.253514 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" event={"ID":"a7004082-a835-4b0c-8ec7-774e94c2cdc5","Type":"ContainerStarted","Data":"2deb15644ded6b99d13f783ec25c9e20513bff1ce2554c0a07d5ba3ea852a357"} Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.254741 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-59jw4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.254793 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.33:8080/healthz\": dial tcp 10.217.0.33:8080: connect: connection refused" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.273040 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:40 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:40 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:40 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.273088 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.284907 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.285275 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.785238047 +0000 UTC m=+148.859406716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.308782 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sn9v6" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.407710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.410323 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:40.910307626 +0000 UTC m=+148.984476295 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.448338 4769 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.508951 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.509297 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.009280553 +0000 UTC m=+149.083449212 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.610255 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.610644 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.110627716 +0000 UTC m=+149.184796385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.710868 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.711048 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.211024851 +0000 UTC m=+149.285193510 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.711182 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.711231 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.711345 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.711636 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.211629019 +0000 UTC m=+149.285797688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.712682 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.713937 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6gsx5" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.725771 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.726692 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.728963 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.734304 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.742749 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.749390 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.812604 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.812974 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.813019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.813697 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.313680511 +0000 UTC m=+149.387849180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.817911 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.818294 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.844003 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.914231 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.914276 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.914307 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.914323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlv5m\" (UniqueName: \"kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:40 crc kubenswrapper[4769]: E0131 16:31:40.914616 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.414602932 +0000 UTC m=+149.488771601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.918421 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.919367 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.921477 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 31 16:31:40 crc kubenswrapper[4769]: I0131 16:31:40.941528 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023189 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023359 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023416 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023452 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlv5m\" (UniqueName: \"kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023475 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w274\" (UniqueName: \"kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: E0131 16:31:41.023539 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.523513503 +0000 UTC m=+149.597682172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.023600 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.024278 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.025489 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.030353 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.051428 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlv5m\" (UniqueName: \"kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m\") pod \"certified-operators-hvfw2\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.114640 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.115602 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127781 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127825 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nztbf\" (UniqueName: \"kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127857 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127935 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.127981 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.128013 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w274\" (UniqueName: \"kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.128962 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.129028 4769 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-31T16:31:40.448360509Z","Handler":null,"Name":""} Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.131589 4769 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.131628 4769 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.132598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.132784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: E0131 16:31:41.134473 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-31 16:31:41.634446501 +0000 UTC m=+149.708615170 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-vdcnf" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.164327 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w274\" (UniqueName: \"kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274\") pod \"community-operators-sn7bc\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.228637 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.228764 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nztbf\" (UniqueName: \"kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.228788 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.228822 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.229254 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.229465 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.234170 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.243100 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nztbf\" (UniqueName: \"kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf\") pod \"certified-operators-dkmqk\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.261480 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" event={"ID":"a7004082-a835-4b0c-8ec7-774e94c2cdc5","Type":"ContainerStarted","Data":"849e126e4cff82ac8e0381370c82e29b3a8d9b92b1cfa4d458cc711595dff861"} Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.267402 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:41 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:41 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:41 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.267465 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.277978 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-4pc7f" podStartSLOduration=10.277961019 podStartE2EDuration="10.277961019s" podCreationTimestamp="2026-01-31 16:31:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:41.276286853 +0000 UTC m=+149.350455522" watchObservedRunningTime="2026-01-31 16:31:41.277961019 +0000 UTC m=+149.352129688" Jan 31 16:31:41 crc kubenswrapper[4769]: W0131 16:31:41.287655 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-5cc1c19a241edb363c16c4361cce197f4f175960d4385971bb95076a994a2ea0 WatchSource:0}: Error finding container 5cc1c19a241edb363c16c4361cce197f4f175960d4385971bb95076a994a2ea0: Status 404 returned error can't find the container with id 5cc1c19a241edb363c16c4361cce197f4f175960d4385971bb95076a994a2ea0 Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.293488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.308838 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.309812 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.319507 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.330198 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.330276 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmt9n\" (UniqueName: \"kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.330348 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.330383 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.335990 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.336027 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.346947 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.395728 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-vdcnf\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.413766 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.436816 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.438093 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.438145 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmt9n\" (UniqueName: \"kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.438186 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.438678 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.438926 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.463619 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmt9n\" (UniqueName: \"kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n\") pod \"community-operators-n6pd8\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: W0131 16:31:41.526291 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-60ed3d9e2141e71564b9f1650b65b8e73d6a11125cf9a7695cff319a0928f4ce WatchSource:0}: Error finding container 60ed3d9e2141e71564b9f1650b65b8e73d6a11125cf9a7695cff319a0928f4ce: Status 404 returned error can't find the container with id 60ed3d9e2141e71564b9f1650b65b8e73d6a11125cf9a7695cff319a0928f4ce Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.627808 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.653434 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:31:41 crc kubenswrapper[4769]: W0131 16:31:41.666675 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e497cac_4dc0_4166_af7d_768713cd0bb8.slice/crio-68ffef9f6a96c7c77ed78ddbce70e788b9f1c1005b22e03a4c89519a5fcd1c41 WatchSource:0}: Error finding container 68ffef9f6a96c7c77ed78ddbce70e788b9f1c1005b22e03a4c89519a5fcd1c41: Status 404 returned error can't find the container with id 68ffef9f6a96c7c77ed78ddbce70e788b9f1c1005b22e03a4c89519a5fcd1c41 Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.711612 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.875392 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:31:41 crc kubenswrapper[4769]: W0131 16:31:41.918083 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52d66af3_ac8c_4ea4_9994_eeeaa65513cc.slice/crio-ae448425c3138cc4081a67f18146f1db8867909d1f3c27cb528559f14e1e3ebe WatchSource:0}: Error finding container ae448425c3138cc4081a67f18146f1db8867909d1f3c27cb528559f14e1e3ebe: Status 404 returned error can't find the container with id ae448425c3138cc4081a67f18146f1db8867909d1f3c27cb528559f14e1e3ebe Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.979996 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:31:41 crc kubenswrapper[4769]: W0131 16:31:41.985251 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4494904a_b7f5_4141_8a63_3360e03bc528.slice/crio-4a6e46ae6cd8c1772a959e2053add752cf78bcaad102acfcb8b9e8384eba523e WatchSource:0}: Error finding container 4a6e46ae6cd8c1772a959e2053add752cf78bcaad102acfcb8b9e8384eba523e: Status 404 returned error can't find the container with id 4a6e46ae6cd8c1772a959e2053add752cf78bcaad102acfcb8b9e8384eba523e Jan 31 16:31:41 crc kubenswrapper[4769]: I0131 16:31:41.990660 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:31:42 crc kubenswrapper[4769]: W0131 16:31:42.010024 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d149dc3_950d_4f91_b78f_d4469a197742.slice/crio-25170940a796171a5fcab55c679cef637333e8aa4a674075871b3577abefe1e1 WatchSource:0}: Error finding container 25170940a796171a5fcab55c679cef637333e8aa4a674075871b3577abefe1e1: Status 404 returned error can't find the container with id 25170940a796171a5fcab55c679cef637333e8aa4a674075871b3577abefe1e1 Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.267742 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e042ab7b2e860fbe874d47c3c147d0296e6857a92629d1aebc349db35a2de8e4"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.268001 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"898f45496472e70cac5ed68f8548c85adfaf53f76e5cdd307e1188be9f15bb08"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.268167 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.269286 4769 generic.go:334] "Generic (PLEG): container finished" podID="58ef25f8-8447-418b-a590-c964242d9336" containerID="c8498f9fe8a1dd2db3064cba3e342011563eddf5d78c3da3dedabf9870a1705a" exitCode=0 Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.269384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerDied","Data":"c8498f9fe8a1dd2db3064cba3e342011563eddf5d78c3da3dedabf9870a1705a"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.269402 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:42 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:42 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:42 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.269432 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerStarted","Data":"3e18f71a17573c51705aae1d0853367401f57b0a486cfb7b0bfe6944bc64afcb"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.269440 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.270829 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"455a517ca75f96191d5396b6e1bb98d70dda72c4f892b261855520320366bdf2"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.270858 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.270871 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"60ed3d9e2141e71564b9f1650b65b8e73d6a11125cf9a7695cff319a0928f4ce"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.275558 4769 generic.go:334] "Generic (PLEG): container finished" podID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerID="28e91a17df67da6449edb19013a9768bd36766090be428eed778c42b4af19b87" exitCode=0 Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.275614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerDied","Data":"28e91a17df67da6449edb19013a9768bd36766090be428eed778c42b4af19b87"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.275630 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerStarted","Data":"68ffef9f6a96c7c77ed78ddbce70e788b9f1c1005b22e03a4c89519a5fcd1c41"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.279933 4769 generic.go:334] "Generic (PLEG): container finished" podID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerID="ea4dd0e115b8278d7efd880046fa9567e17d327c7676f0cdbe785496c802a0ff" exitCode=0 Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.279991 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerDied","Data":"ea4dd0e115b8278d7efd880046fa9567e17d327c7676f0cdbe785496c802a0ff"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.280012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerStarted","Data":"ae448425c3138cc4081a67f18146f1db8867909d1f3c27cb528559f14e1e3ebe"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.288177 4769 generic.go:334] "Generic (PLEG): container finished" podID="3d149dc3-950d-4f91-b78f-d4469a197742" containerID="34783706c879babaccf58564c0546194acaed7cb646f1d99d1539ea1e9474ac3" exitCode=0 Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.288268 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerDied","Data":"34783706c879babaccf58564c0546194acaed7cb646f1d99d1539ea1e9474ac3"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.288307 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerStarted","Data":"25170940a796171a5fcab55c679cef637333e8aa4a674075871b3577abefe1e1"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.289975 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fbcf3d0c0d1390d8ec4a5a9d4591e577c227c642a6f1f3c24b11f59a42ae10b2"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.290021 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"5cc1c19a241edb363c16c4361cce197f4f175960d4385971bb95076a994a2ea0"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.291541 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" event={"ID":"4494904a-b7f5-4141-8a63-3360e03bc528","Type":"ContainerStarted","Data":"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.291572 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" event={"ID":"4494904a-b7f5-4141-8a63-3360e03bc528","Type":"ContainerStarted","Data":"4a6e46ae6cd8c1772a959e2053add752cf78bcaad102acfcb8b9e8384eba523e"} Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.359328 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" podStartSLOduration=128.359311222 podStartE2EDuration="2m8.359311222s" podCreationTimestamp="2026-01-31 16:29:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:31:42.359136228 +0000 UTC m=+150.433304887" watchObservedRunningTime="2026-01-31 16:31:42.359311222 +0000 UTC m=+150.433479891" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.719459 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.719887 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.720813 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.720930 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.722990 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.855558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbghb\" (UniqueName: \"kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.855630 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.855681 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.956835 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.957090 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbghb\" (UniqueName: \"kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.957322 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.957548 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.957811 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:42 crc kubenswrapper[4769]: I0131 16:31:42.983277 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbghb\" (UniqueName: \"kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb\") pod \"redhat-marketplace-zllsb\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.035791 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.119196 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.120152 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.125568 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.261694 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.261754 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.261802 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcxv2\" (UniqueName: \"kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.266722 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:43 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:43 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:43 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.266763 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.299395 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.363881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcxv2\" (UniqueName: \"kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.364359 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.364428 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.365161 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.367832 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.377669 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.377929 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.392658 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.393828 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcxv2\" (UniqueName: \"kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2\") pod \"redhat-marketplace-6n8rw\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.449202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.522987 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.909368 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.909766 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.910261 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.911535 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.917266 4769 patch_prober.go:28] interesting pod/console-f9d7485db-ds894 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.917318 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ds894" podUID="2e845173-60d6-4bbb-a479-e752d55a4d7d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.919838 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.936384 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.973146 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.984815 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsj2r\" (UniqueName: \"kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.984861 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:43 crc kubenswrapper[4769]: I0131 16:31:43.984904 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.086156 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.086249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsj2r\" (UniqueName: \"kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.086275 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.086686 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.086846 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.109225 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsj2r\" (UniqueName: \"kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r\") pod \"redhat-operators-jtrpz\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.256147 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.261782 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.262370 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.264023 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.266090 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.266463 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.270958 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:44 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:44 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:44 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.271006 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.277229 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.316400 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.317597 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.325190 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.339248 4769 generic.go:334] "Generic (PLEG): container finished" podID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerID="a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12" exitCode=0 Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.339327 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerDied","Data":"a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12"} Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.339352 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerStarted","Data":"8be2badc84685c8747c472c72d3ac16b26093a2a3e135c8fee77a4d98490057b"} Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.344521 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerID="988a84bc08815ba425456e1c9fc634d886a26c5ccacc0aa6417058a1ad0230a7" exitCode=0 Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.344573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerDied","Data":"988a84bc08815ba425456e1c9fc634d886a26c5ccacc0aa6417058a1ad0230a7"} Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.344598 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerStarted","Data":"52b318f6c90ee36fa59e0713641dcaf8dd9831b24397b003c589632902f1bca2"} Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.348444 4769 generic.go:334] "Generic (PLEG): container finished" podID="aecc20b3-16e2-4d56-93ec-2c62b4a45e56" containerID="76943742668c5c20bee77cf85f46bc951d40d6b1d5ecd9c3720b69e068175428" exitCode=0 Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.348655 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" event={"ID":"aecc20b3-16e2-4d56-93ec-2c62b4a45e56","Type":"ContainerDied","Data":"76943742668c5c20bee77cf85f46bc951d40d6b1d5ecd9c3720b69e068175428"} Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.360269 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-pwzwp" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.390447 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.390557 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv7tp\" (UniqueName: \"kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.390621 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.390689 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.390766 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.476175 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491624 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491691 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491721 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491759 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv7tp\" (UniqueName: \"kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491780 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.491790 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.492270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.492272 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.514820 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv7tp\" (UniqueName: \"kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp\") pod \"redhat-operators-gzw85\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.523083 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.604095 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2568 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.604146 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-g2568" podUID="c2af561a-0833-4624-a950-1e1bfb2bacaa" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.604444 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-g2568 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" start-of-body= Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.604467 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-g2568" podUID="c2af561a-0833-4624-a950-1e1bfb2bacaa" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.18:8080/\": dial tcp 10.217.0.18:8080: connect: connection refused" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.607065 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.639897 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:31:44 crc kubenswrapper[4769]: I0131 16:31:44.830180 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.012132 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:31:45 crc kubenswrapper[4769]: W0131 16:31:45.048113 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod867ca138_e999_4977_a9f7_f9da159c3515.slice/crio-0393722840dcb84d1e63fda89cd197930b0743b8d0b13f204bba5675c0d1cd1c WatchSource:0}: Error finding container 0393722840dcb84d1e63fda89cd197930b0743b8d0b13f204bba5675c0d1cd1c: Status 404 returned error can't find the container with id 0393722840dcb84d1e63fda89cd197930b0743b8d0b13f204bba5675c0d1cd1c Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.059042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 31 16:31:45 crc kubenswrapper[4769]: W0131 16:31:45.086448 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podca494a3d_c19c_4367_83e4_8a5cafbce7b4.slice/crio-f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb WatchSource:0}: Error finding container f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb: Status 404 returned error can't find the container with id f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.270671 4769 patch_prober.go:28] interesting pod/router-default-5444994796-4lpmw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 31 16:31:45 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Jan 31 16:31:45 crc kubenswrapper[4769]: [+]process-running ok Jan 31 16:31:45 crc kubenswrapper[4769]: healthz check failed Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.270748 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-4lpmw" podUID="b04b6715-da98-41a2-a034-3ee53201f83d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.357918 4769 generic.go:334] "Generic (PLEG): container finished" podID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerID="7480eb8d1802c519bb94c5fc45449127eace4d8aa457d56118ca1c47807b1eba" exitCode=0 Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.357994 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerDied","Data":"7480eb8d1802c519bb94c5fc45449127eace4d8aa457d56118ca1c47807b1eba"} Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.358053 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerStarted","Data":"5d55b959707d5f9346d943e79101198c25bf417f7506cbaf36d784974b3a379f"} Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.361983 4769 generic.go:334] "Generic (PLEG): container finished" podID="867ca138-e999-4977-a9f7-f9da159c3515" containerID="fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa" exitCode=0 Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.362038 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerDied","Data":"fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa"} Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.362061 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerStarted","Data":"0393722840dcb84d1e63fda89cd197930b0743b8d0b13f204bba5675c0d1cd1c"} Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.364877 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ca494a3d-c19c-4367-83e4-8a5cafbce7b4","Type":"ContainerStarted","Data":"f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb"} Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.663559 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.714625 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6n92\" (UniqueName: \"kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92\") pod \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.714678 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume\") pod \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.714712 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume\") pod \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\" (UID: \"aecc20b3-16e2-4d56-93ec-2c62b4a45e56\") " Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.715442 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume" (OuterVolumeSpecName: "config-volume") pod "aecc20b3-16e2-4d56-93ec-2c62b4a45e56" (UID: "aecc20b3-16e2-4d56-93ec-2c62b4a45e56"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.721333 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "aecc20b3-16e2-4d56-93ec-2c62b4a45e56" (UID: "aecc20b3-16e2-4d56-93ec-2c62b4a45e56"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.721470 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92" (OuterVolumeSpecName: "kube-api-access-z6n92") pod "aecc20b3-16e2-4d56-93ec-2c62b4a45e56" (UID: "aecc20b3-16e2-4d56-93ec-2c62b4a45e56"). InnerVolumeSpecName "kube-api-access-z6n92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.816582 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6n92\" (UniqueName: \"kubernetes.io/projected/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-kube-api-access-z6n92\") on node \"crc\" DevicePath \"\"" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.816615 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-config-volume\") on node \"crc\" DevicePath \"\"" Jan 31 16:31:45 crc kubenswrapper[4769]: I0131 16:31:45.816630 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aecc20b3-16e2-4d56-93ec-2c62b4a45e56-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.271287 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.274694 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-4lpmw" Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.386409 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" event={"ID":"aecc20b3-16e2-4d56-93ec-2c62b4a45e56","Type":"ContainerDied","Data":"f5ba5eda851bf4ef32c3aeebe4a99bcbfd8f8000d4d440bb014f9af0657ce902"} Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.386454 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5ba5eda851bf4ef32c3aeebe4a99bcbfd8f8000d4d440bb014f9af0657ce902" Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.386450 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx" Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.389292 4769 generic.go:334] "Generic (PLEG): container finished" podID="ca494a3d-c19c-4367-83e4-8a5cafbce7b4" containerID="c16ab1e86224c7aecd231a9c1e81443e67dacb0aef3b41d8942e4c7638d2b856" exitCode=0 Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.389380 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ca494a3d-c19c-4367-83e4-8a5cafbce7b4","Type":"ContainerDied","Data":"c16ab1e86224c7aecd231a9c1e81443e67dacb0aef3b41d8942e4c7638d2b856"} Jan 31 16:31:46 crc kubenswrapper[4769]: I0131 16:31:46.488923 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-x87ps" Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.704792 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.747775 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access\") pod \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.747876 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir\") pod \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\" (UID: \"ca494a3d-c19c-4367-83e4-8a5cafbce7b4\") " Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.748136 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ca494a3d-c19c-4367-83e4-8a5cafbce7b4" (UID: "ca494a3d-c19c-4367-83e4-8a5cafbce7b4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.753710 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ca494a3d-c19c-4367-83e4-8a5cafbce7b4" (UID: "ca494a3d-c19c-4367-83e4-8a5cafbce7b4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.850424 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:31:47 crc kubenswrapper[4769]: I0131 16:31:47.850459 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca494a3d-c19c-4367-83e4-8a5cafbce7b4-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.413332 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"ca494a3d-c19c-4367-83e4-8a5cafbce7b4","Type":"ContainerDied","Data":"f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb"} Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.413377 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f85bc7eac488f0cd952d0605aa7c046d480c5dbd2f8119645d7de3b1f0cc9dfb" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.413425 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.587825 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 31 16:31:48 crc kubenswrapper[4769]: E0131 16:31:48.588216 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca494a3d-c19c-4367-83e4-8a5cafbce7b4" containerName="pruner" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.588237 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca494a3d-c19c-4367-83e4-8a5cafbce7b4" containerName="pruner" Jan 31 16:31:48 crc kubenswrapper[4769]: E0131 16:31:48.588252 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aecc20b3-16e2-4d56-93ec-2c62b4a45e56" containerName="collect-profiles" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.588260 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="aecc20b3-16e2-4d56-93ec-2c62b4a45e56" containerName="collect-profiles" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.588370 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca494a3d-c19c-4367-83e4-8a5cafbce7b4" containerName="pruner" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.588388 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="aecc20b3-16e2-4d56-93ec-2c62b4a45e56" containerName="collect-profiles" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.588960 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.591145 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.591733 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.591733 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.664073 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.664327 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.765107 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.765211 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.767415 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.787836 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:48 crc kubenswrapper[4769]: I0131 16:31:48.957472 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:31:50 crc kubenswrapper[4769]: I0131 16:31:50.684858 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:31:50 crc kubenswrapper[4769]: I0131 16:31:50.685733 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:31:53 crc kubenswrapper[4769]: I0131 16:31:53.914017 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:53 crc kubenswrapper[4769]: I0131 16:31:53.918110 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-ds894" Jan 31 16:31:54 crc kubenswrapper[4769]: I0131 16:31:54.623335 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-g2568" Jan 31 16:31:55 crc kubenswrapper[4769]: I0131 16:31:55.606916 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:55 crc kubenswrapper[4769]: I0131 16:31:55.614361 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/428b0729-22d7-4feb-a392-1ec77e5acbc0-metrics-certs\") pod \"network-metrics-daemon-bl9cd\" (UID: \"428b0729-22d7-4feb-a392-1ec77e5acbc0\") " pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:55 crc kubenswrapper[4769]: I0131 16:31:55.831044 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-bl9cd" Jan 31 16:31:59 crc kubenswrapper[4769]: I0131 16:31:59.081836 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 31 16:31:59 crc kubenswrapper[4769]: I0131 16:31:59.183535 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-bl9cd"] Jan 31 16:31:59 crc kubenswrapper[4769]: W0131 16:31:59.189955 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod428b0729_22d7_4feb_a392_1ec77e5acbc0.slice/crio-8d7166328e30eb07765458d58948694d7086764cd65b2810d0deaa238202fdf3 WatchSource:0}: Error finding container 8d7166328e30eb07765458d58948694d7086764cd65b2810d0deaa238202fdf3: Status 404 returned error can't find the container with id 8d7166328e30eb07765458d58948694d7086764cd65b2810d0deaa238202fdf3 Jan 31 16:31:59 crc kubenswrapper[4769]: I0131 16:31:59.510013 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"58394703-65b3-4afc-bdb7-d0b07efb7b9d","Type":"ContainerStarted","Data":"f8f458e1cf7afa893e15c2bdd0d9d66b3f40907e0c2af3f84f7a1cbd61073ca4"} Jan 31 16:31:59 crc kubenswrapper[4769]: I0131 16:31:59.510726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" event={"ID":"428b0729-22d7-4feb-a392-1ec77e5acbc0","Type":"ContainerStarted","Data":"8d7166328e30eb07765458d58948694d7086764cd65b2810d0deaa238202fdf3"} Jan 31 16:32:01 crc kubenswrapper[4769]: I0131 16:32:01.422310 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:32:02 crc kubenswrapper[4769]: I0131 16:32:02.530396 4769 generic.go:334] "Generic (PLEG): container finished" podID="58394703-65b3-4afc-bdb7-d0b07efb7b9d" containerID="48e41cde9eb53618af604c4be3219d8fad006f144bf7acd836d5ca5beee9e465" exitCode=0 Jan 31 16:32:02 crc kubenswrapper[4769]: I0131 16:32:02.530476 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"58394703-65b3-4afc-bdb7-d0b07efb7b9d","Type":"ContainerDied","Data":"48e41cde9eb53618af604c4be3219d8fad006f144bf7acd836d5ca5beee9e465"} Jan 31 16:32:14 crc kubenswrapper[4769]: I0131 16:32:14.719683 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-c5wrc" Jan 31 16:32:15 crc kubenswrapper[4769]: I0131 16:32:15.880846 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.942840 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.943271 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qmt9n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-n6pd8_openshift-marketplace(52d66af3-ac8c-4ea4-9994-eeeaa65513cc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.944521 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-n6pd8" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.947526 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.947615 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zlv5m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-hvfw2_openshift-marketplace(58ef25f8-8447-418b-a590-c964242d9336): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.948821 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-hvfw2" podUID="58ef25f8-8447-418b-a590-c964242d9336" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.994840 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.995024 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tv7tp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-gzw85_openshift-marketplace(867ca138-e999-4977-a9f7-f9da159c3515): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 31 16:32:15 crc kubenswrapper[4769]: E0131 16:32:15.996363 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-gzw85" podUID="867ca138-e999-4977-a9f7-f9da159c3515" Jan 31 16:32:15 crc kubenswrapper[4769]: I0131 16:32:15.997895 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access\") pod \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " Jan 31 16:32:15 crc kubenswrapper[4769]: I0131 16:32:15.997965 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir\") pod \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\" (UID: \"58394703-65b3-4afc-bdb7-d0b07efb7b9d\") " Jan 31 16:32:15 crc kubenswrapper[4769]: I0131 16:32:15.998250 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "58394703-65b3-4afc-bdb7-d0b07efb7b9d" (UID: "58394703-65b3-4afc-bdb7-d0b07efb7b9d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.004279 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "58394703-65b3-4afc-bdb7-d0b07efb7b9d" (UID: "58394703-65b3-4afc-bdb7-d0b07efb7b9d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.099155 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.099198 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/58394703-65b3-4afc-bdb7-d0b07efb7b9d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.613164 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.613326 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"58394703-65b3-4afc-bdb7-d0b07efb7b9d","Type":"ContainerDied","Data":"f8f458e1cf7afa893e15c2bdd0d9d66b3f40907e0c2af3f84f7a1cbd61073ca4"} Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.613850 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8f458e1cf7afa893e15c2bdd0d9d66b3f40907e0c2af3f84f7a1cbd61073ca4" Jan 31 16:32:16 crc kubenswrapper[4769]: I0131 16:32:16.616906 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" event={"ID":"428b0729-22d7-4feb-a392-1ec77e5acbc0","Type":"ContainerStarted","Data":"16a08d1ae4da867775afe92f7d3b0b538e47241301b68dd7a2e7fb588f70d555"} Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.311344 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-gzw85" podUID="867ca138-e999-4977-a9f7-f9da159c3515" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.311363 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-hvfw2" podUID="58ef25f8-8447-418b-a590-c964242d9336" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.311389 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-n6pd8" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.387800 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.387950 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kcxv2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-6n8rw_openshift-marketplace(0abe8daf-8786-4b00-b488-d96f19efe5ba): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.389084 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-6n8rw" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.513796 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.514193 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wbghb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-zllsb_openshift-marketplace(63e5ae27-836f-438f-905b-6bb3ffa507ef): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.515358 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-zllsb" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.620984 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-6n8rw" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" Jan 31 16:32:17 crc kubenswrapper[4769]: E0131 16:32:17.621237 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-zllsb" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.627619 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-bl9cd" event={"ID":"428b0729-22d7-4feb-a392-1ec77e5acbc0","Type":"ContainerStarted","Data":"05d33a6411c20322b884e56a16f617eaf16074ae6350741360bf220bda744ac7"} Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.629687 4769 generic.go:334] "Generic (PLEG): container finished" podID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerID="7f92c48312161019f297597f64ffaf4ab8abe986756d7c0e75e77c8dbfd1a966" exitCode=0 Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.629772 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerDied","Data":"7f92c48312161019f297597f64ffaf4ab8abe986756d7c0e75e77c8dbfd1a966"} Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.631324 4769 generic.go:334] "Generic (PLEG): container finished" podID="3d149dc3-950d-4f91-b78f-d4469a197742" containerID="c44bfbface029c596b89996142360042a9885b1d84c37eb19b642d87f3824436" exitCode=0 Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.631400 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerDied","Data":"c44bfbface029c596b89996142360042a9885b1d84c37eb19b642d87f3824436"} Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.633340 4769 generic.go:334] "Generic (PLEG): container finished" podID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerID="b9ff7ef50eaadf11a0c7271b56e557988bbc5590657ecea8b53691ae7998e5a8" exitCode=0 Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.633481 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerDied","Data":"b9ff7ef50eaadf11a0c7271b56e557988bbc5590657ecea8b53691ae7998e5a8"} Jan 31 16:32:18 crc kubenswrapper[4769]: I0131 16:32:18.649130 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-bl9cd" podStartSLOduration=165.649109823 podStartE2EDuration="2m45.649109823s" podCreationTimestamp="2026-01-31 16:29:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:32:18.64140823 +0000 UTC m=+186.715576909" watchObservedRunningTime="2026-01-31 16:32:18.649109823 +0000 UTC m=+186.723278492" Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.649411 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerStarted","Data":"9dd6ceda6bddc5468b99b4d2b8ba03d7f7278b555e9d16180173158a9b8cd8fe"} Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.657341 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerStarted","Data":"772bf34e6a61026bf2601c955e6b208f0cd05de5337e2a7fb7fbd3c3fc186dc2"} Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.661842 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerStarted","Data":"838a0ac9342df925601fa4c103b020a8bf3f6f69234cf66217f2b46af788b5f7"} Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.701082 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sn7bc" podStartSLOduration=2.892210039 podStartE2EDuration="39.701055273s" podCreationTimestamp="2026-01-31 16:31:40 +0000 UTC" firstStartedPulling="2026-01-31 16:31:42.278050085 +0000 UTC m=+150.352218754" lastFinishedPulling="2026-01-31 16:32:19.086895309 +0000 UTC m=+187.161063988" observedRunningTime="2026-01-31 16:32:19.676621117 +0000 UTC m=+187.750789846" watchObservedRunningTime="2026-01-31 16:32:19.701055273 +0000 UTC m=+187.775223962" Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.703805 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jtrpz" podStartSLOduration=3.052130242 podStartE2EDuration="36.703794829s" podCreationTimestamp="2026-01-31 16:31:43 +0000 UTC" firstStartedPulling="2026-01-31 16:31:45.363020515 +0000 UTC m=+153.437189174" lastFinishedPulling="2026-01-31 16:32:19.014685082 +0000 UTC m=+187.088853761" observedRunningTime="2026-01-31 16:32:19.696598399 +0000 UTC m=+187.770767148" watchObservedRunningTime="2026-01-31 16:32:19.703794829 +0000 UTC m=+187.777963518" Jan 31 16:32:19 crc kubenswrapper[4769]: I0131 16:32:19.726110 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dkmqk" podStartSLOduration=1.962869142 podStartE2EDuration="38.726081234s" podCreationTimestamp="2026-01-31 16:31:41 +0000 UTC" firstStartedPulling="2026-01-31 16:31:42.289259245 +0000 UTC m=+150.363427914" lastFinishedPulling="2026-01-31 16:32:19.052471327 +0000 UTC m=+187.126640006" observedRunningTime="2026-01-31 16:32:19.722354872 +0000 UTC m=+187.796523581" watchObservedRunningTime="2026-01-31 16:32:19.726081234 +0000 UTC m=+187.800249943" Jan 31 16:32:20 crc kubenswrapper[4769]: I0131 16:32:20.682257 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:32:20 crc kubenswrapper[4769]: I0131 16:32:20.682324 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:32:20 crc kubenswrapper[4769]: I0131 16:32:20.852209 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 31 16:32:21 crc kubenswrapper[4769]: I0131 16:32:21.294707 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:32:21 crc kubenswrapper[4769]: I0131 16:32:21.295034 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:32:21 crc kubenswrapper[4769]: I0131 16:32:21.437867 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:21 crc kubenswrapper[4769]: I0131 16:32:21.438099 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:21 crc kubenswrapper[4769]: I0131 16:32:21.483596 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:22 crc kubenswrapper[4769]: I0131 16:32:22.421234 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-sn7bc" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="registry-server" probeResult="failure" output=< Jan 31 16:32:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:32:22 crc kubenswrapper[4769]: > Jan 31 16:32:24 crc kubenswrapper[4769]: I0131 16:32:24.256794 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:32:24 crc kubenswrapper[4769]: I0131 16:32:24.257155 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.306320 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jtrpz" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="registry-server" probeResult="failure" output=< Jan 31 16:32:25 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:32:25 crc kubenswrapper[4769]: > Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.578797 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 31 16:32:25 crc kubenswrapper[4769]: E0131 16:32:25.578991 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58394703-65b3-4afc-bdb7-d0b07efb7b9d" containerName="pruner" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.579003 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58394703-65b3-4afc-bdb7-d0b07efb7b9d" containerName="pruner" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.579104 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="58394703-65b3-4afc-bdb7-d0b07efb7b9d" containerName="pruner" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.579447 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.582536 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.582545 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.591042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.614331 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.614426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.715343 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.715398 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.715522 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.747363 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:25 crc kubenswrapper[4769]: I0131 16:32:25.893936 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:26 crc kubenswrapper[4769]: I0131 16:32:26.287109 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 31 16:32:26 crc kubenswrapper[4769]: W0131 16:32:26.296393 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod733b7c3c_229f_4029_b748_dd855b17e079.slice/crio-26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba WatchSource:0}: Error finding container 26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba: Status 404 returned error can't find the container with id 26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba Jan 31 16:32:26 crc kubenswrapper[4769]: I0131 16:32:26.695115 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"733b7c3c-229f-4029-b748-dd855b17e079","Type":"ContainerStarted","Data":"9ca94364209748fc06fdc2b96f444872cdaf1b1a7d262f52aed1e1f0c8cf1e2c"} Jan 31 16:32:26 crc kubenswrapper[4769]: I0131 16:32:26.695401 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"733b7c3c-229f-4029-b748-dd855b17e079","Type":"ContainerStarted","Data":"26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba"} Jan 31 16:32:26 crc kubenswrapper[4769]: I0131 16:32:26.711185 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=1.711170115 podStartE2EDuration="1.711170115s" podCreationTimestamp="2026-01-31 16:32:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:32:26.708142881 +0000 UTC m=+194.782311550" watchObservedRunningTime="2026-01-31 16:32:26.711170115 +0000 UTC m=+194.785338784" Jan 31 16:32:27 crc kubenswrapper[4769]: I0131 16:32:27.702845 4769 generic.go:334] "Generic (PLEG): container finished" podID="733b7c3c-229f-4029-b748-dd855b17e079" containerID="9ca94364209748fc06fdc2b96f444872cdaf1b1a7d262f52aed1e1f0c8cf1e2c" exitCode=0 Jan 31 16:32:27 crc kubenswrapper[4769]: I0131 16:32:27.703140 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"733b7c3c-229f-4029-b748-dd855b17e079","Type":"ContainerDied","Data":"9ca94364209748fc06fdc2b96f444872cdaf1b1a7d262f52aed1e1f0c8cf1e2c"} Jan 31 16:32:28 crc kubenswrapper[4769]: I0131 16:32:28.933434 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.051801 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir\") pod \"733b7c3c-229f-4029-b748-dd855b17e079\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.051959 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "733b7c3c-229f-4029-b748-dd855b17e079" (UID: "733b7c3c-229f-4029-b748-dd855b17e079"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.051995 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access\") pod \"733b7c3c-229f-4029-b748-dd855b17e079\" (UID: \"733b7c3c-229f-4029-b748-dd855b17e079\") " Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.052236 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/733b7c3c-229f-4029-b748-dd855b17e079-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.057384 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "733b7c3c-229f-4029-b748-dd855b17e079" (UID: "733b7c3c-229f-4029-b748-dd855b17e079"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.154133 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/733b7c3c-229f-4029-b748-dd855b17e079-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.725148 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"733b7c3c-229f-4029-b748-dd855b17e079","Type":"ContainerDied","Data":"26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba"} Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.725790 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 31 16:32:29 crc kubenswrapper[4769]: I0131 16:32:29.726183 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26c11d3b34acd82dbf0dbadc405aedbadbe85b05ad0a5c58dccb54f59a12c4ba" Jan 31 16:32:31 crc kubenswrapper[4769]: I0131 16:32:31.361963 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:32:31 crc kubenswrapper[4769]: I0131 16:32:31.403606 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:32:31 crc kubenswrapper[4769]: I0131 16:32:31.483859 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.741254 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerID="271d7fe309ae73e30505a9c6adcaf92863d74e2782a86bdf1ecb68d6db9f97ef" exitCode=0 Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.741330 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerDied","Data":"271d7fe309ae73e30505a9c6adcaf92863d74e2782a86bdf1ecb68d6db9f97ef"} Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.744793 4769 generic.go:334] "Generic (PLEG): container finished" podID="58ef25f8-8447-418b-a590-c964242d9336" containerID="907bc524fa869f0b09d048bea65a6307a83f12f910411d06be63544e043d9f32" exitCode=0 Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.744880 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerDied","Data":"907bc524fa869f0b09d048bea65a6307a83f12f910411d06be63544e043d9f32"} Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.748574 4769 generic.go:334] "Generic (PLEG): container finished" podID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerID="e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7" exitCode=0 Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.748650 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerDied","Data":"e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7"} Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.751326 4769 generic.go:334] "Generic (PLEG): container finished" podID="867ca138-e999-4977-a9f7-f9da159c3515" containerID="611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152" exitCode=0 Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.751360 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerDied","Data":"611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152"} Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.789343 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 31 16:32:32 crc kubenswrapper[4769]: E0131 16:32:32.789912 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="733b7c3c-229f-4029-b748-dd855b17e079" containerName="pruner" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.789998 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="733b7c3c-229f-4029-b748-dd855b17e079" containerName="pruner" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.790204 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="733b7c3c-229f-4029-b748-dd855b17e079" containerName="pruner" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.790768 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.797157 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.797333 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.798788 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.898937 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.899010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.899025 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.999876 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.999940 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:32 crc kubenswrapper[4769]: I0131 16:32:32.999958 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.000011 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.000055 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.025968 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access\") pod \"installer-9-crc\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.143744 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.356521 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 31 16:32:33 crc kubenswrapper[4769]: W0131 16:32:33.361868 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poddcb1a34a_c9b8_4dda_883f_f5b772ed1159.slice/crio-df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4 WatchSource:0}: Error finding container df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4: Status 404 returned error can't find the container with id df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4 Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.736202 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.736709 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dkmqk" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="registry-server" containerID="cri-o://838a0ac9342df925601fa4c103b020a8bf3f6f69234cf66217f2b46af788b5f7" gracePeriod=2 Jan 31 16:32:33 crc kubenswrapper[4769]: I0131 16:32:33.756160 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dcb1a34a-c9b8-4dda-883f-f5b772ed1159","Type":"ContainerStarted","Data":"df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4"} Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.294853 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.337445 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.762957 4769 generic.go:334] "Generic (PLEG): container finished" podID="3d149dc3-950d-4f91-b78f-d4469a197742" containerID="838a0ac9342df925601fa4c103b020a8bf3f6f69234cf66217f2b46af788b5f7" exitCode=0 Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.763027 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerDied","Data":"838a0ac9342df925601fa4c103b020a8bf3f6f69234cf66217f2b46af788b5f7"} Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.763929 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dcb1a34a-c9b8-4dda-883f-f5b772ed1159","Type":"ContainerStarted","Data":"8efadf446faee693c2c7294623d8c902c4e57c29981148dce0c24187850e5b17"} Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.782423 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.782407182 podStartE2EDuration="2.782407182s" podCreationTimestamp="2026-01-31 16:32:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:32:34.780883122 +0000 UTC m=+202.855051791" watchObservedRunningTime="2026-01-31 16:32:34.782407182 +0000 UTC m=+202.856575851" Jan 31 16:32:34 crc kubenswrapper[4769]: I0131 16:32:34.958136 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.132212 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content\") pod \"3d149dc3-950d-4f91-b78f-d4469a197742\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.132342 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nztbf\" (UniqueName: \"kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf\") pod \"3d149dc3-950d-4f91-b78f-d4469a197742\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.132391 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities\") pod \"3d149dc3-950d-4f91-b78f-d4469a197742\" (UID: \"3d149dc3-950d-4f91-b78f-d4469a197742\") " Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.133201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities" (OuterVolumeSpecName: "utilities") pod "3d149dc3-950d-4f91-b78f-d4469a197742" (UID: "3d149dc3-950d-4f91-b78f-d4469a197742"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.139871 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf" (OuterVolumeSpecName: "kube-api-access-nztbf") pod "3d149dc3-950d-4f91-b78f-d4469a197742" (UID: "3d149dc3-950d-4f91-b78f-d4469a197742"). InnerVolumeSpecName "kube-api-access-nztbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.184447 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d149dc3-950d-4f91-b78f-d4469a197742" (UID: "3d149dc3-950d-4f91-b78f-d4469a197742"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.234222 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nztbf\" (UniqueName: \"kubernetes.io/projected/3d149dc3-950d-4f91-b78f-d4469a197742-kube-api-access-nztbf\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.234260 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.234275 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d149dc3-950d-4f91-b78f-d4469a197742-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.772381 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dkmqk" event={"ID":"3d149dc3-950d-4f91-b78f-d4469a197742","Type":"ContainerDied","Data":"25170940a796171a5fcab55c679cef637333e8aa4a674075871b3577abefe1e1"} Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.772686 4769 scope.go:117] "RemoveContainer" containerID="838a0ac9342df925601fa4c103b020a8bf3f6f69234cf66217f2b46af788b5f7" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.772409 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dkmqk" Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.800616 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:32:35 crc kubenswrapper[4769]: I0131 16:32:35.803887 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dkmqk"] Jan 31 16:32:36 crc kubenswrapper[4769]: I0131 16:32:36.004983 4769 scope.go:117] "RemoveContainer" containerID="c44bfbface029c596b89996142360042a9885b1d84c37eb19b642d87f3824436" Jan 31 16:32:36 crc kubenswrapper[4769]: I0131 16:32:36.277810 4769 scope.go:117] "RemoveContainer" containerID="34783706c879babaccf58564c0546194acaed7cb646f1d99d1539ea1e9474ac3" Jan 31 16:32:36 crc kubenswrapper[4769]: I0131 16:32:36.714051 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" path="/var/lib/kubelet/pods/3d149dc3-950d-4f91-b78f-d4469a197742/volumes" Jan 31 16:32:36 crc kubenswrapper[4769]: I0131 16:32:36.778732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerStarted","Data":"c229c183188a9693441f9945a889ef7029a878f9a6edddb3877d89a13f076356"} Jan 31 16:32:36 crc kubenswrapper[4769]: I0131 16:32:36.797607 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hvfw2" podStartSLOduration=3.907365247 podStartE2EDuration="56.797590219s" podCreationTimestamp="2026-01-31 16:31:40 +0000 UTC" firstStartedPulling="2026-01-31 16:31:42.270628729 +0000 UTC m=+150.344797398" lastFinishedPulling="2026-01-31 16:32:35.160853701 +0000 UTC m=+203.235022370" observedRunningTime="2026-01-31 16:32:36.795941736 +0000 UTC m=+204.870110415" watchObservedRunningTime="2026-01-31 16:32:36.797590219 +0000 UTC m=+204.871758898" Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.802594 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerStarted","Data":"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117"} Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.804216 4769 generic.go:334] "Generic (PLEG): container finished" podID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerID="afe218e1fa51b9aacada6d4be3c4ed1b041ea5651d4c4fccfbf8a2a532411208" exitCode=0 Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.804274 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerDied","Data":"afe218e1fa51b9aacada6d4be3c4ed1b041ea5651d4c4fccfbf8a2a532411208"} Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.806168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerStarted","Data":"b080b48d4cea3de1077389fff8580c5956559f80bf504972f898db6df0bc5b4f"} Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.808542 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerStarted","Data":"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e"} Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.837292 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gzw85" podStartSLOduration=2.403410331 podStartE2EDuration="55.837275627s" podCreationTimestamp="2026-01-31 16:31:44 +0000 UTC" firstStartedPulling="2026-01-31 16:31:45.365186045 +0000 UTC m=+153.439354714" lastFinishedPulling="2026-01-31 16:32:38.799051331 +0000 UTC m=+206.873220010" observedRunningTime="2026-01-31 16:32:39.836481966 +0000 UTC m=+207.910650635" watchObservedRunningTime="2026-01-31 16:32:39.837275627 +0000 UTC m=+207.911444296" Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.903801 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zllsb" podStartSLOduration=3.261070942 podStartE2EDuration="57.903786453s" podCreationTimestamp="2026-01-31 16:31:42 +0000 UTC" firstStartedPulling="2026-01-31 16:31:44.346621848 +0000 UTC m=+152.420790517" lastFinishedPulling="2026-01-31 16:32:38.989337349 +0000 UTC m=+207.063506028" observedRunningTime="2026-01-31 16:32:39.901571593 +0000 UTC m=+207.975740262" watchObservedRunningTime="2026-01-31 16:32:39.903786453 +0000 UTC m=+207.977955122" Jan 31 16:32:39 crc kubenswrapper[4769]: I0131 16:32:39.905055 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6n8rw" podStartSLOduration=2.459124931 podStartE2EDuration="56.905049746s" podCreationTimestamp="2026-01-31 16:31:43 +0000 UTC" firstStartedPulling="2026-01-31 16:31:44.340561331 +0000 UTC m=+152.414730000" lastFinishedPulling="2026-01-31 16:32:38.786486146 +0000 UTC m=+206.860654815" observedRunningTime="2026-01-31 16:32:39.884039706 +0000 UTC m=+207.958208375" watchObservedRunningTime="2026-01-31 16:32:39.905049746 +0000 UTC m=+207.979218415" Jan 31 16:32:40 crc kubenswrapper[4769]: I0131 16:32:40.815299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerStarted","Data":"24c9d312679f433805cf74adb660ac9b5d6e747e3f6af96e54354abc77f02ab6"} Jan 31 16:32:40 crc kubenswrapper[4769]: I0131 16:32:40.838614 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n6pd8" podStartSLOduration=1.927446888 podStartE2EDuration="59.838590588s" podCreationTimestamp="2026-01-31 16:31:41 +0000 UTC" firstStartedPulling="2026-01-31 16:31:42.280949666 +0000 UTC m=+150.355118335" lastFinishedPulling="2026-01-31 16:32:40.192093326 +0000 UTC m=+208.266262035" observedRunningTime="2026-01-31 16:32:40.834561601 +0000 UTC m=+208.908730270" watchObservedRunningTime="2026-01-31 16:32:40.838590588 +0000 UTC m=+208.912759277" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.348278 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.348329 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.395227 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.628639 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.628711 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:41 crc kubenswrapper[4769]: I0131 16:32:41.881283 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:32:42 crc kubenswrapper[4769]: I0131 16:32:42.677634 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-n6pd8" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="registry-server" probeResult="failure" output=< Jan 31 16:32:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:32:42 crc kubenswrapper[4769]: > Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.036604 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.036711 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.088149 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.450745 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.451280 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.503202 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:43 crc kubenswrapper[4769]: I0131 16:32:43.895460 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:44 crc kubenswrapper[4769]: I0131 16:32:44.641078 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:44 crc kubenswrapper[4769]: I0131 16:32:44.641148 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:45 crc kubenswrapper[4769]: I0131 16:32:45.690596 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gzw85" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="registry-server" probeResult="failure" output=< Jan 31 16:32:45 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:32:45 crc kubenswrapper[4769]: > Jan 31 16:32:46 crc kubenswrapper[4769]: I0131 16:32:46.343141 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:32:46 crc kubenswrapper[4769]: I0131 16:32:46.866657 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6n8rw" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="registry-server" containerID="cri-o://b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e" gracePeriod=2 Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.326633 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.501834 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities\") pod \"0abe8daf-8786-4b00-b488-d96f19efe5ba\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.501886 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcxv2\" (UniqueName: \"kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2\") pod \"0abe8daf-8786-4b00-b488-d96f19efe5ba\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.502690 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities" (OuterVolumeSpecName: "utilities") pod "0abe8daf-8786-4b00-b488-d96f19efe5ba" (UID: "0abe8daf-8786-4b00-b488-d96f19efe5ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.502742 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content\") pod \"0abe8daf-8786-4b00-b488-d96f19efe5ba\" (UID: \"0abe8daf-8786-4b00-b488-d96f19efe5ba\") " Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.503075 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.510340 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2" (OuterVolumeSpecName: "kube-api-access-kcxv2") pod "0abe8daf-8786-4b00-b488-d96f19efe5ba" (UID: "0abe8daf-8786-4b00-b488-d96f19efe5ba"). InnerVolumeSpecName "kube-api-access-kcxv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.523939 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0abe8daf-8786-4b00-b488-d96f19efe5ba" (UID: "0abe8daf-8786-4b00-b488-d96f19efe5ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.604924 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcxv2\" (UniqueName: \"kubernetes.io/projected/0abe8daf-8786-4b00-b488-d96f19efe5ba-kube-api-access-kcxv2\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.604965 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abe8daf-8786-4b00-b488-d96f19efe5ba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.876781 4769 generic.go:334] "Generic (PLEG): container finished" podID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerID="b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e" exitCode=0 Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.876850 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerDied","Data":"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e"} Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.876919 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6n8rw" event={"ID":"0abe8daf-8786-4b00-b488-d96f19efe5ba","Type":"ContainerDied","Data":"8be2badc84685c8747c472c72d3ac16b26093a2a3e135c8fee77a4d98490057b"} Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.876966 4769 scope.go:117] "RemoveContainer" containerID="b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.876866 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6n8rw" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.902862 4769 scope.go:117] "RemoveContainer" containerID="e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.922459 4769 scope.go:117] "RemoveContainer" containerID="a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.927167 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.950740 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6n8rw"] Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.963898 4769 scope.go:117] "RemoveContainer" containerID="b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e" Jan 31 16:32:47 crc kubenswrapper[4769]: E0131 16:32:47.964415 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e\": container with ID starting with b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e not found: ID does not exist" containerID="b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.964481 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e"} err="failed to get container status \"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e\": rpc error: code = NotFound desc = could not find container \"b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e\": container with ID starting with b8df9d9bb70cb8cde3aa1f32f2ec08e72384723df3d2609177a6a93e1e1cc71e not found: ID does not exist" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.964558 4769 scope.go:117] "RemoveContainer" containerID="e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7" Jan 31 16:32:47 crc kubenswrapper[4769]: E0131 16:32:47.965030 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7\": container with ID starting with e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7 not found: ID does not exist" containerID="e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.965069 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7"} err="failed to get container status \"e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7\": rpc error: code = NotFound desc = could not find container \"e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7\": container with ID starting with e1843a03f53b32a6fcaddac5eb9895998b53961622b83f3ecf2bdd9d746778b7 not found: ID does not exist" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.965089 4769 scope.go:117] "RemoveContainer" containerID="a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12" Jan 31 16:32:47 crc kubenswrapper[4769]: E0131 16:32:47.965402 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12\": container with ID starting with a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12 not found: ID does not exist" containerID="a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12" Jan 31 16:32:47 crc kubenswrapper[4769]: I0131 16:32:47.965435 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12"} err="failed to get container status \"a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12\": rpc error: code = NotFound desc = could not find container \"a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12\": container with ID starting with a07090e65ebb40e4e1bcf9d74d7480072edafd299f75528ff24fb9777b1c1e12 not found: ID does not exist" Jan 31 16:32:48 crc kubenswrapper[4769]: I0131 16:32:48.715195 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" path="/var/lib/kubelet/pods/0abe8daf-8786-4b00-b488-d96f19efe5ba/volumes" Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.682593 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.683062 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.683125 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.683954 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.684051 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217" gracePeriod=600 Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.900296 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217" exitCode=0 Jan 31 16:32:50 crc kubenswrapper[4769]: I0131 16:32:50.900389 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217"} Jan 31 16:32:51 crc kubenswrapper[4769]: I0131 16:32:51.705044 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:51 crc kubenswrapper[4769]: I0131 16:32:51.767784 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:51 crc kubenswrapper[4769]: I0131 16:32:51.909775 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151"} Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.108879 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.538555 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.538847 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n6pd8" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="registry-server" containerID="cri-o://24c9d312679f433805cf74adb660ac9b5d6e747e3f6af96e54354abc77f02ab6" gracePeriod=2 Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.890365 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h8nkx"] Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.926755 4769 generic.go:334] "Generic (PLEG): container finished" podID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerID="24c9d312679f433805cf74adb660ac9b5d6e747e3f6af96e54354abc77f02ab6" exitCode=0 Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.926794 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerDied","Data":"24c9d312679f433805cf74adb660ac9b5d6e747e3f6af96e54354abc77f02ab6"} Jan 31 16:32:53 crc kubenswrapper[4769]: I0131 16:32:53.967545 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.093546 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content\") pod \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.093618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmt9n\" (UniqueName: \"kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n\") pod \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.093647 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities\") pod \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\" (UID: \"52d66af3-ac8c-4ea4-9994-eeeaa65513cc\") " Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.094645 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities" (OuterVolumeSpecName: "utilities") pod "52d66af3-ac8c-4ea4-9994-eeeaa65513cc" (UID: "52d66af3-ac8c-4ea4-9994-eeeaa65513cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.106609 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n" (OuterVolumeSpecName: "kube-api-access-qmt9n") pod "52d66af3-ac8c-4ea4-9994-eeeaa65513cc" (UID: "52d66af3-ac8c-4ea4-9994-eeeaa65513cc"). InnerVolumeSpecName "kube-api-access-qmt9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.158430 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52d66af3-ac8c-4ea4-9994-eeeaa65513cc" (UID: "52d66af3-ac8c-4ea4-9994-eeeaa65513cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.194578 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.194613 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmt9n\" (UniqueName: \"kubernetes.io/projected/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-kube-api-access-qmt9n\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.194627 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52d66af3-ac8c-4ea4-9994-eeeaa65513cc-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.692975 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.759261 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.935734 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6pd8" event={"ID":"52d66af3-ac8c-4ea4-9994-eeeaa65513cc","Type":"ContainerDied","Data":"ae448425c3138cc4081a67f18146f1db8867909d1f3c27cb528559f14e1e3ebe"} Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.935805 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6pd8" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.935817 4769 scope.go:117] "RemoveContainer" containerID="24c9d312679f433805cf74adb660ac9b5d6e747e3f6af96e54354abc77f02ab6" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.961392 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.964528 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n6pd8"] Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.965183 4769 scope.go:117] "RemoveContainer" containerID="afe218e1fa51b9aacada6d4be3c4ed1b041ea5651d4c4fccfbf8a2a532411208" Jan 31 16:32:54 crc kubenswrapper[4769]: I0131 16:32:54.991143 4769 scope.go:117] "RemoveContainer" containerID="ea4dd0e115b8278d7efd880046fa9567e17d327c7676f0cdbe785496c802a0ff" Jan 31 16:32:56 crc kubenswrapper[4769]: I0131 16:32:56.714689 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" path="/var/lib/kubelet/pods/52d66af3-ac8c-4ea4-9994-eeeaa65513cc/volumes" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.353420 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.353893 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gzw85" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="registry-server" containerID="cri-o://50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117" gracePeriod=2 Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.778575 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.954075 4769 generic.go:334] "Generic (PLEG): container finished" podID="867ca138-e999-4977-a9f7-f9da159c3515" containerID="50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117" exitCode=0 Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.954129 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerDied","Data":"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117"} Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.954165 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gzw85" event={"ID":"867ca138-e999-4977-a9f7-f9da159c3515","Type":"ContainerDied","Data":"0393722840dcb84d1e63fda89cd197930b0743b8d0b13f204bba5675c0d1cd1c"} Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.954190 4769 scope.go:117] "RemoveContainer" containerID="50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.954238 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gzw85" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.974971 4769 scope.go:117] "RemoveContainer" containerID="611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.976416 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content\") pod \"867ca138-e999-4977-a9f7-f9da159c3515\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.976473 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv7tp\" (UniqueName: \"kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp\") pod \"867ca138-e999-4977-a9f7-f9da159c3515\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.976556 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities\") pod \"867ca138-e999-4977-a9f7-f9da159c3515\" (UID: \"867ca138-e999-4977-a9f7-f9da159c3515\") " Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.978049 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities" (OuterVolumeSpecName: "utilities") pod "867ca138-e999-4977-a9f7-f9da159c3515" (UID: "867ca138-e999-4977-a9f7-f9da159c3515"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.985035 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp" (OuterVolumeSpecName: "kube-api-access-tv7tp") pod "867ca138-e999-4977-a9f7-f9da159c3515" (UID: "867ca138-e999-4977-a9f7-f9da159c3515"). InnerVolumeSpecName "kube-api-access-tv7tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:32:57 crc kubenswrapper[4769]: I0131 16:32:57.992135 4769 scope.go:117] "RemoveContainer" containerID="fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.035714 4769 scope.go:117] "RemoveContainer" containerID="50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117" Jan 31 16:32:58 crc kubenswrapper[4769]: E0131 16:32:58.036265 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117\": container with ID starting with 50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117 not found: ID does not exist" containerID="50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.036333 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117"} err="failed to get container status \"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117\": rpc error: code = NotFound desc = could not find container \"50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117\": container with ID starting with 50ae420b825ce0bc823738638c3343e2f984bc07bca115e37d896807d8294117 not found: ID does not exist" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.036416 4769 scope.go:117] "RemoveContainer" containerID="611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152" Jan 31 16:32:58 crc kubenswrapper[4769]: E0131 16:32:58.036873 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152\": container with ID starting with 611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152 not found: ID does not exist" containerID="611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.036908 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152"} err="failed to get container status \"611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152\": rpc error: code = NotFound desc = could not find container \"611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152\": container with ID starting with 611ce905c7c655e593b197b376f4f20897058135401ace52cfa11ec238301152 not found: ID does not exist" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.036933 4769 scope.go:117] "RemoveContainer" containerID="fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa" Jan 31 16:32:58 crc kubenswrapper[4769]: E0131 16:32:58.037353 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa\": container with ID starting with fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa not found: ID does not exist" containerID="fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.037408 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa"} err="failed to get container status \"fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa\": rpc error: code = NotFound desc = could not find container \"fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa\": container with ID starting with fd3b21ffa26f67b360a04c391fb71ae8c38ecc5f30dd84f4cd723c2201cb2eaa not found: ID does not exist" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.078629 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv7tp\" (UniqueName: \"kubernetes.io/projected/867ca138-e999-4977-a9f7-f9da159c3515-kube-api-access-tv7tp\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.078667 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.118926 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "867ca138-e999-4977-a9f7-f9da159c3515" (UID: "867ca138-e999-4977-a9f7-f9da159c3515"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.179880 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/867ca138-e999-4977-a9f7-f9da159c3515-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.286432 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.293729 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gzw85"] Jan 31 16:32:58 crc kubenswrapper[4769]: I0131 16:32:58.716307 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="867ca138-e999-4977-a9f7-f9da159c3515" path="/var/lib/kubelet/pods/867ca138-e999-4977-a9f7-f9da159c3515/volumes" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.388707 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389268 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389284 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389302 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389311 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389325 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389335 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389345 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389355 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389363 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389371 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389383 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389391 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389406 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389414 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389424 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389432 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="extract-content" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389447 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389455 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389469 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389477 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389510 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389519 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.389531 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389541 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="extract-utilities" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389666 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0abe8daf-8786-4b00-b488-d96f19efe5ba" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389680 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d149dc3-950d-4f91-b78f-d4469a197742" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389691 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="867ca138-e999-4977-a9f7-f9da159c3515" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.389704 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="52d66af3-ac8c-4ea4-9994-eeeaa65513cc" containerName="registry-server" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390056 4769 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390241 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390483 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273" gracePeriod=15 Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390541 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc" gracePeriod=15 Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390605 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf" gracePeriod=15 Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390673 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef" gracePeriod=15 Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390733 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0" gracePeriod=15 Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.390988 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391173 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391185 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391199 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391209 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391220 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391255 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391268 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391277 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391295 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391304 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391343 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391352 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: E0131 16:33:11.391366 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391374 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391555 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391570 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391613 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391628 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391640 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.391649 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470119 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470401 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470437 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470603 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470665 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470701 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470771 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.470812 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.571757 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.571831 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.571924 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.571994 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572022 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572116 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572052 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572169 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572181 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572238 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572244 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572294 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572304 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572341 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.572350 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.645645 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 31 16:33:11 crc kubenswrapper[4769]: I0131 16:33:11.645723 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.032831 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034238 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034752 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273" exitCode=0 Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034774 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef" exitCode=0 Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034782 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc" exitCode=0 Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034789 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf" exitCode=2 Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.034841 4769 scope.go:117] "RemoveContainer" containerID="d0edc64ad9b20c3dcdb6fe9eb8c5d2caa46542678bdde95e3af643ee8ab747c0" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.038319 4769 generic.go:334] "Generic (PLEG): container finished" podID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" containerID="8efadf446faee693c2c7294623d8c902c4e57c29981148dce0c24187850e5b17" exitCode=0 Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.038359 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dcb1a34a-c9b8-4dda-883f-f5b772ed1159","Type":"ContainerDied","Data":"8efadf446faee693c2c7294623d8c902c4e57c29981148dce0c24187850e5b17"} Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.039236 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.039583 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.362330 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.362405 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.710223 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:12 crc kubenswrapper[4769]: I0131 16:33:12.710605 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.047763 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.281006 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.281286 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.281551 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.281882 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.282110 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.282137 4769 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.282517 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="200ms" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.348129 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.348699 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399000 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock\") pod \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399080 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock" (OuterVolumeSpecName: "var-lock") pod "dcb1a34a-c9b8-4dda-883f-f5b772ed1159" (UID: "dcb1a34a-c9b8-4dda-883f-f5b772ed1159"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399112 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir\") pod \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399149 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "dcb1a34a-c9b8-4dda-883f-f5b772ed1159" (UID: "dcb1a34a-c9b8-4dda-883f-f5b772ed1159"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399309 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access\") pod \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\" (UID: \"dcb1a34a-c9b8-4dda-883f-f5b772ed1159\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399702 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.399721 4769 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-var-lock\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.407027 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "dcb1a34a-c9b8-4dda-883f-f5b772ed1159" (UID: "dcb1a34a-c9b8-4dda-883f-f5b772ed1159"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.489318 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="400ms" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.505063 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dcb1a34a-c9b8-4dda-883f-f5b772ed1159-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.755383 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.756041 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.756552 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.756883 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.807845 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.807931 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.807970 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.807995 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.807970 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.808055 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.808470 4769 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.808483 4769 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: I0131 16:33:13.808531 4769 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:13 crc kubenswrapper[4769]: E0131 16:33:13.890447 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="800ms" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.054323 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dcb1a34a-c9b8-4dda-883f-f5b772ed1159","Type":"ContainerDied","Data":"df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4"} Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.054366 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df387f9f606a8acfccd1ed16dfa2b0c6b5ca1e607536c254446576c3b549d5b4" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.054408 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.059255 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.060416 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0" exitCode=0 Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.060464 4769 scope.go:117] "RemoveContainer" containerID="ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.060618 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.066989 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.067556 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.075132 4769 scope.go:117] "RemoveContainer" containerID="40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.087842 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.088293 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.092490 4769 scope.go:117] "RemoveContainer" containerID="f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.104669 4769 scope.go:117] "RemoveContainer" containerID="7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.119765 4769 scope.go:117] "RemoveContainer" containerID="f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.135726 4769 scope.go:117] "RemoveContainer" containerID="bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.154344 4769 scope.go:117] "RemoveContainer" containerID="ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.154789 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\": container with ID starting with ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273 not found: ID does not exist" containerID="ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.154839 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273"} err="failed to get container status \"ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\": rpc error: code = NotFound desc = could not find container \"ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273\": container with ID starting with ea05dfd88d830d7c377d77364401b0aa38eed179d29479f084a174042ad29273 not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.154874 4769 scope.go:117] "RemoveContainer" containerID="40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.155204 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\": container with ID starting with 40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef not found: ID does not exist" containerID="40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.155243 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef"} err="failed to get container status \"40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\": rpc error: code = NotFound desc = could not find container \"40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef\": container with ID starting with 40101dd77a07020a7e59d552ff5dd5be1feab2dd46efe7704af10440496911ef not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.155267 4769 scope.go:117] "RemoveContainer" containerID="f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.155566 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\": container with ID starting with f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc not found: ID does not exist" containerID="f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.155603 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc"} err="failed to get container status \"f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\": rpc error: code = NotFound desc = could not find container \"f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc\": container with ID starting with f1b9ef3d7eac93ae752bbddd48dff9cf07b72156394076877a8fbcee91be70cc not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.155627 4769 scope.go:117] "RemoveContainer" containerID="7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.156221 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\": container with ID starting with 7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf not found: ID does not exist" containerID="7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.156325 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf"} err="failed to get container status \"7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\": rpc error: code = NotFound desc = could not find container \"7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf\": container with ID starting with 7987317e525da87e3566d06ce3af3358d0655fb0b6623887c581b7f531c984cf not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.156423 4769 scope.go:117] "RemoveContainer" containerID="f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.158654 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\": container with ID starting with f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0 not found: ID does not exist" containerID="f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.158702 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0"} err="failed to get container status \"f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\": rpc error: code = NotFound desc = could not find container \"f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0\": container with ID starting with f8689f435d75e38fb0c6f0b0d3b1dc7629004f6783dfd73822f01aec17223ef0 not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.158729 4769 scope.go:117] "RemoveContainer" containerID="bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.159067 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\": container with ID starting with bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a not found: ID does not exist" containerID="bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.159111 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a"} err="failed to get container status \"bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\": rpc error: code = NotFound desc = could not find container \"bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a\": container with ID starting with bed588eaf95d020fa6c6e26937a926625ff475322b83fac74978e87b4698f74a not found: ID does not exist" Jan 31 16:33:14 crc kubenswrapper[4769]: E0131 16:33:14.691577 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="1.6s" Jan 31 16:33:14 crc kubenswrapper[4769]: I0131 16:33:14.713924 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 31 16:33:15 crc kubenswrapper[4769]: E0131 16:33:15.808114 4769 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.22:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" volumeName="registry-storage" Jan 31 16:33:16 crc kubenswrapper[4769]: E0131 16:33:16.292560 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="3.2s" Jan 31 16:33:16 crc kubenswrapper[4769]: E0131 16:33:16.439375 4769 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.22:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:16 crc kubenswrapper[4769]: I0131 16:33:16.440141 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:16 crc kubenswrapper[4769]: W0131 16:33:16.470855 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-49c9285d57d9341fd6964844e235577001d83957f2aa61fceac959b631469bde WatchSource:0}: Error finding container 49c9285d57d9341fd6964844e235577001d83957f2aa61fceac959b631469bde: Status 404 returned error can't find the container with id 49c9285d57d9341fd6964844e235577001d83957f2aa61fceac959b631469bde Jan 31 16:33:16 crc kubenswrapper[4769]: E0131 16:33:16.477839 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.22:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188fddeed7fec6c3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-31 16:33:16.475647683 +0000 UTC m=+244.549816382,LastTimestamp:2026-01-31 16:33:16.475647683 +0000 UTC m=+244.549816382,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 31 16:33:17 crc kubenswrapper[4769]: I0131 16:33:17.081274 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e"} Jan 31 16:33:17 crc kubenswrapper[4769]: I0131 16:33:17.081585 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"49c9285d57d9341fd6964844e235577001d83957f2aa61fceac959b631469bde"} Jan 31 16:33:17 crc kubenswrapper[4769]: I0131 16:33:17.082362 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:17 crc kubenswrapper[4769]: E0131 16:33:17.082525 4769 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.22:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:18 crc kubenswrapper[4769]: E0131 16:33:18.674613 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.22:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188fddeed7fec6c3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-31 16:33:16.475647683 +0000 UTC m=+244.549816382,LastTimestamp:2026-01-31 16:33:16.475647683 +0000 UTC m=+244.549816382,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 31 16:33:18 crc kubenswrapper[4769]: I0131 16:33:18.925366 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerName="oauth-openshift" containerID="cri-o://aa088c60e088a9f8c3a9899946bf609f1c5250a4c20484ebea483b036ac25038" gracePeriod=15 Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.093126 4769 generic.go:334] "Generic (PLEG): container finished" podID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerID="aa088c60e088a9f8c3a9899946bf609f1c5250a4c20484ebea483b036ac25038" exitCode=0 Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.093239 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" event={"ID":"37adecb9-a5fd-4e61-869b-4a04ac424ac0","Type":"ContainerDied","Data":"aa088c60e088a9f8c3a9899946bf609f1c5250a4c20484ebea483b036ac25038"} Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.362173 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.362860 4769 status_manager.go:851] "Failed to get status for pod" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-h8nkx\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.363113 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484630 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxnwg\" (UniqueName: \"kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484717 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484777 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484812 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484853 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484887 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484935 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.484969 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485079 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485128 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485181 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485451 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485482 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485611 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485676 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template\") pod \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\" (UID: \"37adecb9-a5fd-4e61-869b-4a04ac424ac0\") " Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485978 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.485999 4769 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.486081 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.486646 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.486771 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.490778 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.491485 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.492101 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.492189 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg" (OuterVolumeSpecName: "kube-api-access-xxnwg") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "kube-api-access-xxnwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.492453 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.492861 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: E0131 16:33:19.493289 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.22:6443: connect: connection refused" interval="6.4s" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.493363 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.497820 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.498136 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "37adecb9-a5fd-4e61-869b-4a04ac424ac0" (UID: "37adecb9-a5fd-4e61-869b-4a04ac424ac0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587175 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587222 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587243 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587263 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587284 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587301 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587319 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587337 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxnwg\" (UniqueName: \"kubernetes.io/projected/37adecb9-a5fd-4e61-869b-4a04ac424ac0-kube-api-access-xxnwg\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587354 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587372 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587391 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587408 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:19 crc kubenswrapper[4769]: I0131 16:33:19.587425 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/37adecb9-a5fd-4e61-869b-4a04ac424ac0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.103857 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" event={"ID":"37adecb9-a5fd-4e61-869b-4a04ac424ac0","Type":"ContainerDied","Data":"344e7e435ea686f5b6aa3c4ca6f18338dac606366f5a0b0a563cd0442f2361ab"} Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.103947 4769 scope.go:117] "RemoveContainer" containerID="aa088c60e088a9f8c3a9899946bf609f1c5250a4c20484ebea483b036ac25038" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.103933 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.105570 4769 status_manager.go:851] "Failed to get status for pod" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-h8nkx\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.106066 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.133642 4769 status_manager.go:851] "Failed to get status for pod" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-h8nkx\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:20 crc kubenswrapper[4769]: I0131 16:33:20.133999 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.708107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.709434 4769 status_manager.go:851] "Failed to get status for pod" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-h8nkx\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.710211 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.735481 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.735579 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:21 crc kubenswrapper[4769]: E0131 16:33:21.736149 4769 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:21 crc kubenswrapper[4769]: I0131 16:33:21.737130 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:21 crc kubenswrapper[4769]: W0131 16:33:21.770385 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-e5437e196d5957713b434131647f5d25b832922128e516a0ac93e9fa7b64f4c5 WatchSource:0}: Error finding container e5437e196d5957713b434131647f5d25b832922128e516a0ac93e9fa7b64f4c5: Status 404 returned error can't find the container with id e5437e196d5957713b434131647f5d25b832922128e516a0ac93e9fa7b64f4c5 Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.133838 4769 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="e20dbaa53452119fd36d466d56695efc110642c12d36efbb809147d40550d4b9" exitCode=0 Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.133924 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"e20dbaa53452119fd36d466d56695efc110642c12d36efbb809147d40550d4b9"} Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.134206 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e5437e196d5957713b434131647f5d25b832922128e516a0ac93e9fa7b64f4c5"} Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.134649 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.134680 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:22 crc kubenswrapper[4769]: E0131 16:33:22.135135 4769 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.135188 4769 status_manager.go:851] "Failed to get status for pod" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" pod="openshift-authentication/oauth-openshift-558db77b4-h8nkx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-h8nkx\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:22 crc kubenswrapper[4769]: I0131 16:33:22.135591 4769 status_manager.go:851] "Failed to get status for pod" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.22:6443: connect: connection refused" Jan 31 16:33:23 crc kubenswrapper[4769]: I0131 16:33:23.141908 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a738ffe81989017a524d341d0e883af45d84382cf98a08e318cb843e3abbf373"} Jan 31 16:33:23 crc kubenswrapper[4769]: I0131 16:33:23.142251 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"029ef98b2b97858fd06ad92eef46de21915015dc9e2cb89b28825c2ce158fe17"} Jan 31 16:33:23 crc kubenswrapper[4769]: I0131 16:33:23.142264 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d7351ab325924e356d2794efcca053b0623c2bef2df20821566f004425ed2da5"} Jan 31 16:33:24 crc kubenswrapper[4769]: I0131 16:33:24.159903 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"470deac6b2890f2e73302b3e5eb9134ef7ead6bf1ef2157babd325531b1898a2"} Jan 31 16:33:24 crc kubenswrapper[4769]: I0131 16:33:24.160216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e731d5b84ead7cb6ea800023206da6724fb3ee89f36e191f80f5a1797441df22"} Jan 31 16:33:24 crc kubenswrapper[4769]: I0131 16:33:24.160824 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:24 crc kubenswrapper[4769]: I0131 16:33:24.160862 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:24 crc kubenswrapper[4769]: I0131 16:33:24.160899 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.189662 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.189746 4769 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc" exitCode=1 Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.189789 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc"} Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.190576 4769 scope.go:117] "RemoveContainer" containerID="4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc" Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.737630 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.737695 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:26 crc kubenswrapper[4769]: I0131 16:33:26.746731 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:27 crc kubenswrapper[4769]: I0131 16:33:27.198854 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 31 16:33:27 crc kubenswrapper[4769]: I0131 16:33:27.198912 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"12006731881b1919dfe5a5d9281b503eae6953c99b18e4424ef4119d6cffc524"} Jan 31 16:33:29 crc kubenswrapper[4769]: I0131 16:33:29.168032 4769 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:29 crc kubenswrapper[4769]: I0131 16:33:29.210875 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:29 crc kubenswrapper[4769]: I0131 16:33:29.210930 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:29 crc kubenswrapper[4769]: I0131 16:33:29.216921 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:29 crc kubenswrapper[4769]: I0131 16:33:29.221315 4769 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="665e7481-0721-4b78-88e2-1502e9f6c706" Jan 31 16:33:30 crc kubenswrapper[4769]: I0131 16:33:30.216525 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:30 crc kubenswrapper[4769]: I0131 16:33:30.216572 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:32 crc kubenswrapper[4769]: I0131 16:33:32.726491 4769 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="665e7481-0721-4b78-88e2-1502e9f6c706" Jan 31 16:33:32 crc kubenswrapper[4769]: I0131 16:33:32.984901 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:33:33 crc kubenswrapper[4769]: I0131 16:33:33.702039 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:33:33 crc kubenswrapper[4769]: I0131 16:33:33.702206 4769 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 31 16:33:33 crc kubenswrapper[4769]: I0131 16:33:33.702276 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.158737 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.245600 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.393297 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.396520 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.577280 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.616402 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.767697 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.910861 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 31 16:33:35 crc kubenswrapper[4769]: I0131 16:33:35.932188 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.026420 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.059801 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.082042 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.119040 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.121174 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.178821 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.195098 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.276454 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.287807 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.328961 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.499785 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.526227 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.678661 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.701644 4769 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.782249 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.878917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 31 16:33:36 crc kubenswrapper[4769]: I0131 16:33:36.891286 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.066657 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.199833 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.237290 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.280448 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.299606 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.340515 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.373957 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.440465 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.440657 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.513037 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.850805 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.887348 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.928781 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.933171 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.950792 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 31 16:33:37 crc kubenswrapper[4769]: I0131 16:33:37.983741 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.037687 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.396573 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.409095 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.527086 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.540559 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.586297 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.696426 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.849878 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.915058 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 31 16:33:38 crc kubenswrapper[4769]: I0131 16:33:38.937468 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.064188 4769 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.228527 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.231183 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.270005 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.302131 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.378613 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.474841 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.660257 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.930609 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.939825 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 31 16:33:39 crc kubenswrapper[4769]: I0131 16:33:39.983663 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.181268 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.227694 4769 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.360279 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.489350 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.609588 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.691194 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.851114 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.914141 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.928532 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 31 16:33:40 crc kubenswrapper[4769]: I0131 16:33:40.951905 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 31 16:33:41 crc kubenswrapper[4769]: I0131 16:33:41.166641 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 31 16:33:41 crc kubenswrapper[4769]: I0131 16:33:41.420729 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 31 16:33:41 crc kubenswrapper[4769]: I0131 16:33:41.530736 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 31 16:33:41 crc kubenswrapper[4769]: I0131 16:33:41.533297 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 31 16:33:41 crc kubenswrapper[4769]: I0131 16:33:41.761266 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 31 16:33:42 crc kubenswrapper[4769]: I0131 16:33:42.225791 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 31 16:33:42 crc kubenswrapper[4769]: I0131 16:33:42.585056 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 31 16:33:43 crc kubenswrapper[4769]: I0131 16:33:43.478965 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 31 16:33:43 crc kubenswrapper[4769]: I0131 16:33:43.702738 4769 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 31 16:33:43 crc kubenswrapper[4769]: I0131 16:33:43.702849 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 31 16:33:43 crc kubenswrapper[4769]: I0131 16:33:43.942272 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 31 16:33:43 crc kubenswrapper[4769]: I0131 16:33:43.982127 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 31 16:33:44 crc kubenswrapper[4769]: I0131 16:33:44.236903 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 31 16:33:44 crc kubenswrapper[4769]: I0131 16:33:44.602306 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 31 16:33:44 crc kubenswrapper[4769]: I0131 16:33:44.637225 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 31 16:33:44 crc kubenswrapper[4769]: I0131 16:33:44.937324 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 31 16:33:44 crc kubenswrapper[4769]: I0131 16:33:44.970673 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.061284 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.090329 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.164322 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.243182 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.440348 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.476419 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.490912 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.629324 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.826026 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 31 16:33:45 crc kubenswrapper[4769]: I0131 16:33:45.981476 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.125557 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.205432 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.228112 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.230941 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.256874 4769 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.262035 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-h8nkx","openshift-kube-apiserver/kube-apiserver-crc"] Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.262106 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.262436 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.262472 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="21fa2db2-f448-487d-9ddb-ba4da28e8ffa" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.273587 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.295701 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.295674914 podStartE2EDuration="17.295674914s" podCreationTimestamp="2026-01-31 16:33:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:33:46.294299637 +0000 UTC m=+274.368468316" watchObservedRunningTime="2026-01-31 16:33:46.295674914 +0000 UTC m=+274.369843583" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.424574 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.463481 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.721991 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" path="/var/lib/kubelet/pods/37adecb9-a5fd-4e61-869b-4a04ac424ac0/volumes" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.827898 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.859506 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 31 16:33:46 crc kubenswrapper[4769]: I0131 16:33:46.923110 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.026044 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.050972 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.077611 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.116861 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.147060 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.174188 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.184998 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.399278 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.525478 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.672957 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.778909 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.830780 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.883191 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.927649 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.939803 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 31 16:33:47 crc kubenswrapper[4769]: I0131 16:33:47.951631 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.007352 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-96d6999f9-hcbz5"] Jan 31 16:33:48 crc kubenswrapper[4769]: E0131 16:33:48.007576 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" containerName="installer" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.007593 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" containerName="installer" Jan 31 16:33:48 crc kubenswrapper[4769]: E0131 16:33:48.007608 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerName="oauth-openshift" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.007614 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerName="oauth-openshift" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.007701 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcb1a34a-c9b8-4dda-883f-f5b772ed1159" containerName="installer" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.007712 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="37adecb9-a5fd-4e61-869b-4a04ac424ac0" containerName="oauth-openshift" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.008097 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.010453 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.010719 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.010888 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.011391 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.011896 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.012478 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.012609 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.013312 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.013316 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.013462 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.013772 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.013796 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.019229 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.028686 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.030284 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.031592 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.036456 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.037432 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-96d6999f9-hcbz5"] Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.163098 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202242 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fsvj\" (UniqueName: \"kubernetes.io/projected/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-kube-api-access-2fsvj\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202291 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-session\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202326 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202359 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202673 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-error\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202778 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-policies\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202820 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202868 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-login\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202913 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202945 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202964 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.202995 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.203022 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.203052 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-dir\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.243088 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304121 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304188 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304226 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304275 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304314 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304351 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-dir\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304414 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fsvj\" (UniqueName: \"kubernetes.io/projected/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-kube-api-access-2fsvj\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304448 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-session\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304478 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304545 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-error\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304580 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304623 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-policies\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304655 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.304697 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-login\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.305430 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.306402 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.306482 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-dir\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.307437 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.309369 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-audit-policies\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.311823 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.312193 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.312766 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.313605 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-error\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.314185 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-user-template-login\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.316599 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.323848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-session\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.330481 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fsvj\" (UniqueName: \"kubernetes.io/projected/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-kube-api-access-2fsvj\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.332620 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/de151a35-fe3f-4e99-94fe-3ed66f82b3ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-96d6999f9-hcbz5\" (UID: \"de151a35-fe3f-4e99-94fe-3ed66f82b3ef\") " pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.342666 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.515301 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.563341 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.761393 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.791800 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-96d6999f9-hcbz5"] Jan 31 16:33:48 crc kubenswrapper[4769]: W0131 16:33:48.804297 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde151a35_fe3f_4e99_94fe_3ed66f82b3ef.slice/crio-2e2e0c9fbc9b4207e3fca0131a2459077834b6d7c23c88ee6cdd14a32ef57ad6 WatchSource:0}: Error finding container 2e2e0c9fbc9b4207e3fca0131a2459077834b6d7c23c88ee6cdd14a32ef57ad6: Status 404 returned error can't find the container with id 2e2e0c9fbc9b4207e3fca0131a2459077834b6d7c23c88ee6cdd14a32ef57ad6 Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.920600 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.966195 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.971938 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 31 16:33:48 crc kubenswrapper[4769]: I0131 16:33:48.987169 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.242589 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.288830 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.347898 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" event={"ID":"de151a35-fe3f-4e99-94fe-3ed66f82b3ef","Type":"ContainerStarted","Data":"e84db32ace58a5eb6f86d959978b565b312cf9d6674665ba16ce3a8728a44a0f"} Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.347953 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" event={"ID":"de151a35-fe3f-4e99-94fe-3ed66f82b3ef","Type":"ContainerStarted","Data":"2e2e0c9fbc9b4207e3fca0131a2459077834b6d7c23c88ee6cdd14a32ef57ad6"} Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.348549 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.352864 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.358316 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.391317 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.436556 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.503124 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.550733 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.568906 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.571856 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-96d6999f9-hcbz5" podStartSLOduration=56.571826337 podStartE2EDuration="56.571826337s" podCreationTimestamp="2026-01-31 16:32:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:33:49.384028651 +0000 UTC m=+277.458197350" watchObservedRunningTime="2026-01-31 16:33:49.571826337 +0000 UTC m=+277.645995016" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.641220 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.783291 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 31 16:33:49 crc kubenswrapper[4769]: I0131 16:33:49.907927 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.067154 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.137925 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.140155 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.249627 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.276181 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.295431 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.360157 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.397529 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.397598 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.408171 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.550412 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.681348 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.712721 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.800092 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.818729 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.921688 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 31 16:33:50 crc kubenswrapper[4769]: I0131 16:33:50.932352 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.017326 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.146658 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.266214 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.267579 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.350030 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.421430 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.509909 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.525411 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.529359 4769 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.529719 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e" gracePeriod=5 Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.566341 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.649142 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.684615 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.803302 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.818404 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.860536 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 31 16:33:51 crc kubenswrapper[4769]: I0131 16:33:51.995155 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.004934 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.076005 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.093118 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.106688 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.149040 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.150143 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.250870 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.250996 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.251560 4769 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.266685 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.352783 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.355611 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.414120 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.461117 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.479658 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.594817 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.622630 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.819947 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 31 16:33:52 crc kubenswrapper[4769]: I0131 16:33:52.902545 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.009726 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.140448 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.175184 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.188545 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.392544 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.703289 4769 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.703460 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.703625 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.706023 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"12006731881b1919dfe5a5d9281b503eae6953c99b18e4424ef4119d6cffc524"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.706444 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://12006731881b1919dfe5a5d9281b503eae6953c99b18e4424ef4119d6cffc524" gracePeriod=30 Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.803089 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.886780 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 31 16:33:53 crc kubenswrapper[4769]: I0131 16:33:53.921329 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.040578 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.088432 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.165953 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.169474 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.187792 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.254306 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.296590 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.331190 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.358873 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.400917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.557681 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 31 16:33:54 crc kubenswrapper[4769]: I0131 16:33:54.763608 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.171707 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.229006 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.259822 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.383907 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.589075 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.662427 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 31 16:33:55 crc kubenswrapper[4769]: I0131 16:33:55.977633 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 31 16:33:56 crc kubenswrapper[4769]: I0131 16:33:56.267334 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 31 16:33:56 crc kubenswrapper[4769]: I0131 16:33:56.342942 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 31 16:33:56 crc kubenswrapper[4769]: I0131 16:33:56.482446 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 31 16:33:56 crc kubenswrapper[4769]: I0131 16:33:56.637664 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.136222 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.136344 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.309872 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.309962 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.309996 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310022 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310344 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310745 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310871 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310902 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.310929 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.322026 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.408732 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.408797 4769 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e" exitCode=137 Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.408845 4769 scope.go:117] "RemoveContainer" containerID="966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.409003 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.412539 4769 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.412729 4769 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.412900 4769 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.413106 4769 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.413313 4769 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.433457 4769 scope.go:117] "RemoveContainer" containerID="966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e" Jan 31 16:33:57 crc kubenswrapper[4769]: E0131 16:33:57.434518 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e\": container with ID starting with 966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e not found: ID does not exist" containerID="966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.434587 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e"} err="failed to get container status \"966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e\": rpc error: code = NotFound desc = could not find container \"966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e\": container with ID starting with 966481c8e8a6a266ceacf7b005c1afc334c4ef8c90be4680282ce5b97cc3f00e not found: ID does not exist" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.502737 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.710778 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 31 16:33:57 crc kubenswrapper[4769]: I0131 16:33:57.733162 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 31 16:33:58 crc kubenswrapper[4769]: I0131 16:33:58.105650 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 31 16:33:58 crc kubenswrapper[4769]: I0131 16:33:58.321910 4769 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 31 16:33:58 crc kubenswrapper[4769]: I0131 16:33:58.720488 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 31 16:33:58 crc kubenswrapper[4769]: I0131 16:33:58.832276 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 31 16:33:58 crc kubenswrapper[4769]: I0131 16:33:58.944987 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 31 16:33:59 crc kubenswrapper[4769]: I0131 16:33:59.449596 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 31 16:33:59 crc kubenswrapper[4769]: I0131 16:33:59.827940 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 31 16:34:12 crc kubenswrapper[4769]: I0131 16:34:12.463819 4769 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.567572 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.570794 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.570854 4769 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="12006731881b1919dfe5a5d9281b503eae6953c99b18e4424ef4119d6cffc524" exitCode=137 Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.570905 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"12006731881b1919dfe5a5d9281b503eae6953c99b18e4424ef4119d6cffc524"} Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.570954 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"735cb69ed2811064cf336bef6bf426eed3004da2780e1c8312d2e7c48c082d9c"} Jan 31 16:34:24 crc kubenswrapper[4769]: I0131 16:34:24.570976 4769 scope.go:117] "RemoveContainer" containerID="4dd9c6bf8a15857982b68a045b8ca1d407a63ad3da192375596f26389d9983cc" Jan 31 16:34:25 crc kubenswrapper[4769]: I0131 16:34:25.579768 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 31 16:34:32 crc kubenswrapper[4769]: I0131 16:34:32.984257 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:34:33 crc kubenswrapper[4769]: I0131 16:34:33.702267 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:34:33 crc kubenswrapper[4769]: I0131 16:34:33.744749 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:34:34 crc kubenswrapper[4769]: I0131 16:34:34.638846 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.298664 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.299394 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerName="controller-manager" containerID="cri-o://d72cc3684fc894a73491f654528b7571e9c9dc2f848db5abd8345958c8664014" gracePeriod=30 Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.301563 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.301748 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerName="route-controller-manager" containerID="cri-o://16ad5663578e3ee24ce678025d4dcb1d0018d2d5f0b1104969804f920c4183e5" gracePeriod=30 Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.714831 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerID="16ad5663578e3ee24ce678025d4dcb1d0018d2d5f0b1104969804f920c4183e5" exitCode=0 Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.715134 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" event={"ID":"f1cfa945-9049-4fe1-bde7-549e1c5d0e39","Type":"ContainerDied","Data":"16ad5663578e3ee24ce678025d4dcb1d0018d2d5f0b1104969804f920c4183e5"} Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.717450 4769 generic.go:334] "Generic (PLEG): container finished" podID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerID="d72cc3684fc894a73491f654528b7571e9c9dc2f848db5abd8345958c8664014" exitCode=0 Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.717482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" event={"ID":"1efa9307-bdd7-4ec9-ab59-32196c343838","Type":"ContainerDied","Data":"d72cc3684fc894a73491f654528b7571e9c9dc2f848db5abd8345958c8664014"} Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.791956 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.798043 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947628 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca\") pod \"1efa9307-bdd7-4ec9-ab59-32196c343838\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947739 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca\") pod \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947775 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfvw2\" (UniqueName: \"kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2\") pod \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947815 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles\") pod \"1efa9307-bdd7-4ec9-ab59-32196c343838\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947859 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config\") pod \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947878 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config\") pod \"1efa9307-bdd7-4ec9-ab59-32196c343838\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.947922 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert\") pod \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\" (UID: \"f1cfa945-9049-4fe1-bde7-549e1c5d0e39\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.948007 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert\") pod \"1efa9307-bdd7-4ec9-ab59-32196c343838\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.948046 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bx4f\" (UniqueName: \"kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f\") pod \"1efa9307-bdd7-4ec9-ab59-32196c343838\" (UID: \"1efa9307-bdd7-4ec9-ab59-32196c343838\") " Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.948908 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca" (OuterVolumeSpecName: "client-ca") pod "f1cfa945-9049-4fe1-bde7-549e1c5d0e39" (UID: "f1cfa945-9049-4fe1-bde7-549e1c5d0e39"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.949113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca" (OuterVolumeSpecName: "client-ca") pod "1efa9307-bdd7-4ec9-ab59-32196c343838" (UID: "1efa9307-bdd7-4ec9-ab59-32196c343838"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.949674 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config" (OuterVolumeSpecName: "config") pod "1efa9307-bdd7-4ec9-ab59-32196c343838" (UID: "1efa9307-bdd7-4ec9-ab59-32196c343838"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.950081 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config" (OuterVolumeSpecName: "config") pod "f1cfa945-9049-4fe1-bde7-549e1c5d0e39" (UID: "f1cfa945-9049-4fe1-bde7-549e1c5d0e39"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.950199 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1efa9307-bdd7-4ec9-ab59-32196c343838" (UID: "1efa9307-bdd7-4ec9-ab59-32196c343838"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.956096 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1efa9307-bdd7-4ec9-ab59-32196c343838" (UID: "1efa9307-bdd7-4ec9-ab59-32196c343838"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.956634 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2" (OuterVolumeSpecName: "kube-api-access-mfvw2") pod "f1cfa945-9049-4fe1-bde7-549e1c5d0e39" (UID: "f1cfa945-9049-4fe1-bde7-549e1c5d0e39"). InnerVolumeSpecName "kube-api-access-mfvw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.956960 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f" (OuterVolumeSpecName: "kube-api-access-2bx4f") pod "1efa9307-bdd7-4ec9-ab59-32196c343838" (UID: "1efa9307-bdd7-4ec9-ab59-32196c343838"). InnerVolumeSpecName "kube-api-access-2bx4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:45 crc kubenswrapper[4769]: I0131 16:34:45.956997 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f1cfa945-9049-4fe1-bde7-549e1c5d0e39" (UID: "f1cfa945-9049-4fe1-bde7-549e1c5d0e39"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.049971 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.049999 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050009 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050021 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1efa9307-bdd7-4ec9-ab59-32196c343838-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050031 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bx4f\" (UniqueName: \"kubernetes.io/projected/1efa9307-bdd7-4ec9-ab59-32196c343838-kube-api-access-2bx4f\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050041 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-client-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050050 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-client-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050058 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfvw2\" (UniqueName: \"kubernetes.io/projected/f1cfa945-9049-4fe1-bde7-549e1c5d0e39-kube-api-access-mfvw2\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.050067 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1efa9307-bdd7-4ec9-ab59-32196c343838-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.674566 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw"] Jan 31 16:34:46 crc kubenswrapper[4769]: E0131 16:34:46.674914 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.674934 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 31 16:34:46 crc kubenswrapper[4769]: E0131 16:34:46.674964 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerName="controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.674976 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerName="controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: E0131 16:34:46.674992 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerName="route-controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.675004 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerName="route-controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.675212 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" containerName="route-controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.675239 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" containerName="controller-manager" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.675254 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.675920 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.691937 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.723543 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" event={"ID":"f1cfa945-9049-4fe1-bde7-549e1c5d0e39","Type":"ContainerDied","Data":"788cc8bb7866e5f080995e45c21b9f1a7de64ba610775519633b6ca34de28799"} Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.723612 4769 scope.go:117] "RemoveContainer" containerID="16ad5663578e3ee24ce678025d4dcb1d0018d2d5f0b1104969804f920c4183e5" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.723566 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.726104 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" event={"ID":"1efa9307-bdd7-4ec9-ab59-32196c343838","Type":"ContainerDied","Data":"4eb532f556a29102ff49af3d0a160233bcb17c835601fe63f3fb20e807cfd9f1"} Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.726215 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-79bp9" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.747310 4769 scope.go:117] "RemoveContainer" containerID="d72cc3684fc894a73491f654528b7571e9c9dc2f848db5abd8345958c8664014" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.765455 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.770563 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-8td6v"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.778804 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.781968 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-79bp9"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.791113 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.792713 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.796254 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.798128 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.799525 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.799713 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.799756 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.799840 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.804917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.805120 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.858932 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4b8428c-ab24-46b9-b79f-53b39483aafa-serving-cert\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.858995 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg7q9\" (UniqueName: \"kubernetes.io/projected/a4b8428c-ab24-46b9-b79f-53b39483aafa-kube-api-access-kg7q9\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.859030 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-config\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.859574 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-client-ca\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961163 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4b8428c-ab24-46b9-b79f-53b39483aafa-serving-cert\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961213 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2rh2\" (UniqueName: \"kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961295 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg7q9\" (UniqueName: \"kubernetes.io/projected/a4b8428c-ab24-46b9-b79f-53b39483aafa-kube-api-access-kg7q9\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961324 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-config\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961690 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961774 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961828 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-client-ca\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.961887 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.962632 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-config\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.962673 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a4b8428c-ab24-46b9-b79f-53b39483aafa-client-ca\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.972204 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4b8428c-ab24-46b9-b79f-53b39483aafa-serving-cert\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.977349 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg7q9\" (UniqueName: \"kubernetes.io/projected/a4b8428c-ab24-46b9-b79f-53b39483aafa-kube-api-access-kg7q9\") pod \"route-controller-manager-77d8544d9d-4s5nw\" (UID: \"a4b8428c-ab24-46b9-b79f-53b39483aafa\") " pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:46 crc kubenswrapper[4769]: I0131 16:34:46.992448 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.062775 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.062822 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2rh2\" (UniqueName: \"kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.062854 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.062877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.063717 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.064005 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.064804 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.064854 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.073064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.084735 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2rh2\" (UniqueName: \"kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2\") pod \"controller-manager-74dc647949-ccv4m\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.109476 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.183257 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw"] Jan 31 16:34:47 crc kubenswrapper[4769]: W0131 16:34:47.188336 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4b8428c_ab24_46b9_b79f_53b39483aafa.slice/crio-678f2e0705f0c7798bff167b09e8ac688eec02224895994929655766479cfd3e WatchSource:0}: Error finding container 678f2e0705f0c7798bff167b09e8ac688eec02224895994929655766479cfd3e: Status 404 returned error can't find the container with id 678f2e0705f0c7798bff167b09e8ac688eec02224895994929655766479cfd3e Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.577136 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:34:47 crc kubenswrapper[4769]: W0131 16:34:47.577672 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde542b67_94ed_458b_8665_8d4f03f7a0e8.slice/crio-70c973f03e539760d5cdf75b150f9043c65b5dedce983f3465617d118e0f9044 WatchSource:0}: Error finding container 70c973f03e539760d5cdf75b150f9043c65b5dedce983f3465617d118e0f9044: Status 404 returned error can't find the container with id 70c973f03e539760d5cdf75b150f9043c65b5dedce983f3465617d118e0f9044 Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.737335 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" event={"ID":"a4b8428c-ab24-46b9-b79f-53b39483aafa","Type":"ContainerStarted","Data":"c3fc216741db312d09baeed56c8b2f47e41adfbb2fc7dba9e7dd56f2c95e8720"} Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.737441 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" event={"ID":"a4b8428c-ab24-46b9-b79f-53b39483aafa","Type":"ContainerStarted","Data":"678f2e0705f0c7798bff167b09e8ac688eec02224895994929655766479cfd3e"} Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.738856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" event={"ID":"de542b67-94ed-458b-8665-8d4f03f7a0e8","Type":"ContainerStarted","Data":"c12619a9560dfc578a9c5d642a68711e0934897ee665939e718b68d4a437a039"} Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.738907 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" event={"ID":"de542b67-94ed-458b-8665-8d4f03f7a0e8","Type":"ContainerStarted","Data":"70c973f03e539760d5cdf75b150f9043c65b5dedce983f3465617d118e0f9044"} Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.739111 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.739348 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.741149 4769 patch_prober.go:28] interesting pod/controller-manager-74dc647949-ccv4m container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" start-of-body= Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.741302 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": dial tcp 10.217.0.58:8443: connect: connection refused" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.785136 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" podStartSLOduration=1.785115005 podStartE2EDuration="1.785115005s" podCreationTimestamp="2026-01-31 16:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:34:47.766144863 +0000 UTC m=+335.840313532" watchObservedRunningTime="2026-01-31 16:34:47.785115005 +0000 UTC m=+335.859283664" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.787294 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" podStartSLOduration=2.787286905 podStartE2EDuration="2.787286905s" podCreationTimestamp="2026-01-31 16:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:34:47.782603046 +0000 UTC m=+335.856771725" watchObservedRunningTime="2026-01-31 16:34:47.787286905 +0000 UTC m=+335.861455574" Jan 31 16:34:47 crc kubenswrapper[4769]: I0131 16:34:47.828172 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77d8544d9d-4s5nw" Jan 31 16:34:48 crc kubenswrapper[4769]: I0131 16:34:48.719353 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1efa9307-bdd7-4ec9-ab59-32196c343838" path="/var/lib/kubelet/pods/1efa9307-bdd7-4ec9-ab59-32196c343838/volumes" Jan 31 16:34:48 crc kubenswrapper[4769]: I0131 16:34:48.720241 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1cfa945-9049-4fe1-bde7-549e1c5d0e39" path="/var/lib/kubelet/pods/f1cfa945-9049-4fe1-bde7-549e1c5d0e39/volumes" Jan 31 16:34:48 crc kubenswrapper[4769]: I0131 16:34:48.763765 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.523110 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.524060 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hvfw2" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="registry-server" containerID="cri-o://c229c183188a9693441f9945a889ef7029a878f9a6edddb3877d89a13f076356" gracePeriod=30 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.538720 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.538967 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sn7bc" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="registry-server" containerID="cri-o://9dd6ceda6bddc5468b99b4d2b8ba03d7f7278b555e9d16180173158a9b8cd8fe" gracePeriod=30 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.541511 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.541796 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" containerID="cri-o://3ddacbe469aee19591bea55f769bcd54c2cdadb6c1feeab32fa1ce8320a01724" gracePeriod=30 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.553466 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.553730 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zllsb" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="registry-server" containerID="cri-o://b080b48d4cea3de1077389fff8580c5956559f80bf504972f898db6df0bc5b4f" gracePeriod=30 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.556244 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-47rlr"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.558196 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.562346 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.565116 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jtrpz" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="registry-server" containerID="cri-o://772bf34e6a61026bf2601c955e6b208f0cd05de5337e2a7fb7fbd3c3fc186dc2" gracePeriod=30 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.570610 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-47rlr"] Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.720582 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kclbz\" (UniqueName: \"kubernetes.io/projected/ab91e141-74c5-4c96-87fb-f0f1d41f7456-kube-api-access-kclbz\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.720652 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.720724 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.821563 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.821646 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kclbz\" (UniqueName: \"kubernetes.io/projected/ab91e141-74c5-4c96-87fb-f0f1d41f7456-kube-api-access-kclbz\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.821673 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.823169 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.827813 4769 generic.go:334] "Generic (PLEG): container finished" podID="58ef25f8-8447-418b-a590-c964242d9336" containerID="c229c183188a9693441f9945a889ef7029a878f9a6edddb3877d89a13f076356" exitCode=0 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.827899 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerDied","Data":"c229c183188a9693441f9945a889ef7029a878f9a6edddb3877d89a13f076356"} Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.829603 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ab91e141-74c5-4c96-87fb-f0f1d41f7456-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.829876 4769 generic.go:334] "Generic (PLEG): container finished" podID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerID="772bf34e6a61026bf2601c955e6b208f0cd05de5337e2a7fb7fbd3c3fc186dc2" exitCode=0 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.829898 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerDied","Data":"772bf34e6a61026bf2601c955e6b208f0cd05de5337e2a7fb7fbd3c3fc186dc2"} Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.832422 4769 generic.go:334] "Generic (PLEG): container finished" podID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerID="9dd6ceda6bddc5468b99b4d2b8ba03d7f7278b555e9d16180173158a9b8cd8fe" exitCode=0 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.832512 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerDied","Data":"9dd6ceda6bddc5468b99b4d2b8ba03d7f7278b555e9d16180173158a9b8cd8fe"} Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.834440 4769 generic.go:334] "Generic (PLEG): container finished" podID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerID="3ddacbe469aee19591bea55f769bcd54c2cdadb6c1feeab32fa1ce8320a01724" exitCode=0 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.834484 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" event={"ID":"c57a349c-3bb6-4a77-8a0a-59683f544d6d","Type":"ContainerDied","Data":"3ddacbe469aee19591bea55f769bcd54c2cdadb6c1feeab32fa1ce8320a01724"} Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.837468 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerID="b080b48d4cea3de1077389fff8580c5956559f80bf504972f898db6df0bc5b4f" exitCode=0 Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.837523 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerDied","Data":"b080b48d4cea3de1077389fff8580c5956559f80bf504972f898db6df0bc5b4f"} Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.838341 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kclbz\" (UniqueName: \"kubernetes.io/projected/ab91e141-74c5-4c96-87fb-f0f1d41f7456-kube-api-access-kclbz\") pod \"marketplace-operator-79b997595-47rlr\" (UID: \"ab91e141-74c5-4c96-87fb-f0f1d41f7456\") " pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:58 crc kubenswrapper[4769]: I0131 16:34:58.976683 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.067191 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.227079 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content\") pod \"4e497cac-4dc0-4166-af7d-768713cd0bb8\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.227371 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w274\" (UniqueName: \"kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274\") pod \"4e497cac-4dc0-4166-af7d-768713cd0bb8\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.227576 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities\") pod \"4e497cac-4dc0-4166-af7d-768713cd0bb8\" (UID: \"4e497cac-4dc0-4166-af7d-768713cd0bb8\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.230356 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities" (OuterVolumeSpecName: "utilities") pod "4e497cac-4dc0-4166-af7d-768713cd0bb8" (UID: "4e497cac-4dc0-4166-af7d-768713cd0bb8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.232608 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274" (OuterVolumeSpecName: "kube-api-access-6w274") pod "4e497cac-4dc0-4166-af7d-768713cd0bb8" (UID: "4e497cac-4dc0-4166-af7d-768713cd0bb8"). InnerVolumeSpecName "kube-api-access-6w274". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.266726 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.274821 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.282683 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.285318 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.299068 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e497cac-4dc0-4166-af7d-768713cd0bb8" (UID: "4e497cac-4dc0-4166-af7d-768713cd0bb8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.329187 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w274\" (UniqueName: \"kubernetes.io/projected/4e497cac-4dc0-4166-af7d-768713cd0bb8-kube-api-access-6w274\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.329226 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.329241 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e497cac-4dc0-4166-af7d-768713cd0bb8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.429839 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics\") pod \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.429909 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content\") pod \"58ef25f8-8447-418b-a590-c964242d9336\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.429939 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbghb\" (UniqueName: \"kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb\") pod \"63e5ae27-836f-438f-905b-6bb3ffa507ef\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.429976 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cqvh\" (UniqueName: \"kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh\") pod \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430005 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content\") pod \"62c63967-4fa2-4a8c-a186-a58dd20f6228\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430038 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities\") pod \"63e5ae27-836f-438f-905b-6bb3ffa507ef\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430074 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsj2r\" (UniqueName: \"kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r\") pod \"62c63967-4fa2-4a8c-a186-a58dd20f6228\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430111 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities\") pod \"62c63967-4fa2-4a8c-a186-a58dd20f6228\" (UID: \"62c63967-4fa2-4a8c-a186-a58dd20f6228\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430148 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content\") pod \"63e5ae27-836f-438f-905b-6bb3ffa507ef\" (UID: \"63e5ae27-836f-438f-905b-6bb3ffa507ef\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430170 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlv5m\" (UniqueName: \"kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m\") pod \"58ef25f8-8447-418b-a590-c964242d9336\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430201 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca\") pod \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\" (UID: \"c57a349c-3bb6-4a77-8a0a-59683f544d6d\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.430221 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities\") pod \"58ef25f8-8447-418b-a590-c964242d9336\" (UID: \"58ef25f8-8447-418b-a590-c964242d9336\") " Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.431218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities" (OuterVolumeSpecName: "utilities") pod "63e5ae27-836f-438f-905b-6bb3ffa507ef" (UID: "63e5ae27-836f-438f-905b-6bb3ffa507ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.431283 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities" (OuterVolumeSpecName: "utilities") pod "58ef25f8-8447-418b-a590-c964242d9336" (UID: "58ef25f8-8447-418b-a590-c964242d9336"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.431956 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "c57a349c-3bb6-4a77-8a0a-59683f544d6d" (UID: "c57a349c-3bb6-4a77-8a0a-59683f544d6d"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.432228 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities" (OuterVolumeSpecName: "utilities") pod "62c63967-4fa2-4a8c-a186-a58dd20f6228" (UID: "62c63967-4fa2-4a8c-a186-a58dd20f6228"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.432934 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb" (OuterVolumeSpecName: "kube-api-access-wbghb") pod "63e5ae27-836f-438f-905b-6bb3ffa507ef" (UID: "63e5ae27-836f-438f-905b-6bb3ffa507ef"). InnerVolumeSpecName "kube-api-access-wbghb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.433170 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m" (OuterVolumeSpecName: "kube-api-access-zlv5m") pod "58ef25f8-8447-418b-a590-c964242d9336" (UID: "58ef25f8-8447-418b-a590-c964242d9336"). InnerVolumeSpecName "kube-api-access-zlv5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.433436 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r" (OuterVolumeSpecName: "kube-api-access-tsj2r") pod "62c63967-4fa2-4a8c-a186-a58dd20f6228" (UID: "62c63967-4fa2-4a8c-a186-a58dd20f6228"). InnerVolumeSpecName "kube-api-access-tsj2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.434993 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh" (OuterVolumeSpecName: "kube-api-access-5cqvh") pod "c57a349c-3bb6-4a77-8a0a-59683f544d6d" (UID: "c57a349c-3bb6-4a77-8a0a-59683f544d6d"). InnerVolumeSpecName "kube-api-access-5cqvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.453053 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "c57a349c-3bb6-4a77-8a0a-59683f544d6d" (UID: "c57a349c-3bb6-4a77-8a0a-59683f544d6d"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.470086 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63e5ae27-836f-438f-905b-6bb3ffa507ef" (UID: "63e5ae27-836f-438f-905b-6bb3ffa507ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.474406 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58ef25f8-8447-418b-a590-c964242d9336" (UID: "58ef25f8-8447-418b-a590-c964242d9336"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531480 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsj2r\" (UniqueName: \"kubernetes.io/projected/62c63967-4fa2-4a8c-a186-a58dd20f6228-kube-api-access-tsj2r\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531532 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531549 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531561 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlv5m\" (UniqueName: \"kubernetes.io/projected/58ef25f8-8447-418b-a590-c964242d9336-kube-api-access-zlv5m\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531572 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531582 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531595 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c57a349c-3bb6-4a77-8a0a-59683f544d6d-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531606 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58ef25f8-8447-418b-a590-c964242d9336-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531617 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbghb\" (UniqueName: \"kubernetes.io/projected/63e5ae27-836f-438f-905b-6bb3ffa507ef-kube-api-access-wbghb\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531625 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cqvh\" (UniqueName: \"kubernetes.io/projected/c57a349c-3bb6-4a77-8a0a-59683f544d6d-kube-api-access-5cqvh\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.531633 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63e5ae27-836f-438f-905b-6bb3ffa507ef-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.566539 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-47rlr"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.593334 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62c63967-4fa2-4a8c-a186-a58dd20f6228" (UID: "62c63967-4fa2-4a8c-a186-a58dd20f6228"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.632986 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62c63967-4fa2-4a8c-a186-a58dd20f6228-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.842760 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" event={"ID":"c57a349c-3bb6-4a77-8a0a-59683f544d6d","Type":"ContainerDied","Data":"3bf21dbf0e92471ae1c18867e9ee367a366a8fd06409e255c439ce3631d260cd"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.842809 4769 scope.go:117] "RemoveContainer" containerID="3ddacbe469aee19591bea55f769bcd54c2cdadb6c1feeab32fa1ce8320a01724" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.843106 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-59jw4" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.848693 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zllsb" event={"ID":"63e5ae27-836f-438f-905b-6bb3ffa507ef","Type":"ContainerDied","Data":"52b318f6c90ee36fa59e0713641dcaf8dd9831b24397b003c589632902f1bca2"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.848718 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zllsb" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.854288 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvfw2" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.856246 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtrpz" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.856662 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvfw2" event={"ID":"58ef25f8-8447-418b-a590-c964242d9336","Type":"ContainerDied","Data":"3e18f71a17573c51705aae1d0853367401f57b0a486cfb7b0bfe6944bc64afcb"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.856701 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtrpz" event={"ID":"62c63967-4fa2-4a8c-a186-a58dd20f6228","Type":"ContainerDied","Data":"5d55b959707d5f9346d943e79101198c25bf417f7506cbaf36d784974b3a379f"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.859173 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" event={"ID":"ab91e141-74c5-4c96-87fb-f0f1d41f7456","Type":"ContainerStarted","Data":"bdeba9fe8c584ab383445e41a06cf02cc20d04a18d0bce0d62d2e7cf49e83639"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.859239 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" event={"ID":"ab91e141-74c5-4c96-87fb-f0f1d41f7456","Type":"ContainerStarted","Data":"9cb93205a7d6557da25fa2f69b40fd9ad93f497248ee50e86cbad088d9bc8d0f"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.859451 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.860916 4769 scope.go:117] "RemoveContainer" containerID="b080b48d4cea3de1077389fff8580c5956559f80bf504972f898db6df0bc5b4f" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.860800 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-47rlr container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.60:8080/healthz\": dial tcp 10.217.0.60:8080: connect: connection refused" start-of-body= Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.861206 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" podUID="ab91e141-74c5-4c96-87fb-f0f1d41f7456" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.60:8080/healthz\": dial tcp 10.217.0.60:8080: connect: connection refused" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.862814 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sn7bc" event={"ID":"4e497cac-4dc0-4166-af7d-768713cd0bb8","Type":"ContainerDied","Data":"68ffef9f6a96c7c77ed78ddbce70e788b9f1c1005b22e03a4c89519a5fcd1c41"} Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.862907 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sn7bc" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.877140 4769 scope.go:117] "RemoveContainer" containerID="271d7fe309ae73e30505a9c6adcaf92863d74e2782a86bdf1ecb68d6db9f97ef" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.888446 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" podStartSLOduration=1.888393283 podStartE2EDuration="1.888393283s" podCreationTimestamp="2026-01-31 16:34:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:34:59.886651695 +0000 UTC m=+347.960820384" watchObservedRunningTime="2026-01-31 16:34:59.888393283 +0000 UTC m=+347.962561952" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.906748 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.913799 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jtrpz"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.921807 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.922345 4769 scope.go:117] "RemoveContainer" containerID="988a84bc08815ba425456e1c9fc634d886a26c5ccacc0aa6417058a1ad0230a7" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.928616 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hvfw2"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.933454 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.939159 4769 scope.go:117] "RemoveContainer" containerID="c229c183188a9693441f9945a889ef7029a878f9a6edddb3877d89a13f076356" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.943721 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-59jw4"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.974803 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.975348 4769 scope.go:117] "RemoveContainer" containerID="907bc524fa869f0b09d048bea65a6307a83f12f910411d06be63544e043d9f32" Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.986888 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sn7bc"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.990332 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.995727 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zllsb"] Jan 31 16:34:59 crc kubenswrapper[4769]: I0131 16:34:59.997758 4769 scope.go:117] "RemoveContainer" containerID="c8498f9fe8a1dd2db3064cba3e342011563eddf5d78c3da3dedabf9870a1705a" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.020362 4769 scope.go:117] "RemoveContainer" containerID="772bf34e6a61026bf2601c955e6b208f0cd05de5337e2a7fb7fbd3c3fc186dc2" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.032331 4769 scope.go:117] "RemoveContainer" containerID="7f92c48312161019f297597f64ffaf4ab8abe986756d7c0e75e77c8dbfd1a966" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.059170 4769 scope.go:117] "RemoveContainer" containerID="7480eb8d1802c519bb94c5fc45449127eace4d8aa457d56118ca1c47807b1eba" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.074923 4769 scope.go:117] "RemoveContainer" containerID="9dd6ceda6bddc5468b99b4d2b8ba03d7f7278b555e9d16180173158a9b8cd8fe" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.088839 4769 scope.go:117] "RemoveContainer" containerID="b9ff7ef50eaadf11a0c7271b56e557988bbc5590657ecea8b53691ae7998e5a8" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.102659 4769 scope.go:117] "RemoveContainer" containerID="28e91a17df67da6449edb19013a9768bd36766090be428eed778c42b4af19b87" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.714926 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" path="/var/lib/kubelet/pods/4e497cac-4dc0-4166-af7d-768713cd0bb8/volumes" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.716074 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58ef25f8-8447-418b-a590-c964242d9336" path="/var/lib/kubelet/pods/58ef25f8-8447-418b-a590-c964242d9336/volumes" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.716809 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" path="/var/lib/kubelet/pods/62c63967-4fa2-4a8c-a186-a58dd20f6228/volumes" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.717916 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" path="/var/lib/kubelet/pods/63e5ae27-836f-438f-905b-6bb3ffa507ef/volumes" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.718657 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" path="/var/lib/kubelet/pods/c57a349c-3bb6-4a77-8a0a-59683f544d6d/volumes" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.734653 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kkrd9"] Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736166 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736190 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736203 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736213 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736229 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736237 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736250 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736257 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736270 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736278 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736286 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736293 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736304 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736313 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736323 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736331 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736342 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736350 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="extract-content" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736361 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736369 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736381 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736388 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736396 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736403 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: E0131 16:35:00.736416 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736423 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="extract-utilities" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736569 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="58ef25f8-8447-418b-a590-c964242d9336" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736582 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57a349c-3bb6-4a77-8a0a-59683f544d6d" containerName="marketplace-operator" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736596 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e5ae27-836f-438f-905b-6bb3ffa507ef" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736606 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="62c63967-4fa2-4a8c-a186-a58dd20f6228" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.736617 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e497cac-4dc0-4166-af7d-768713cd0bb8" containerName="registry-server" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.737281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.739251 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.740609 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkrd9"] Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.875577 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-47rlr" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.889200 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-utilities\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.889400 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dvxj\" (UniqueName: \"kubernetes.io/projected/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-kube-api-access-4dvxj\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.889575 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-catalog-content\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.937393 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-scx7g"] Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.938529 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.940188 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.951220 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-scx7g"] Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.990607 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-utilities\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.990658 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dvxj\" (UniqueName: \"kubernetes.io/projected/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-kube-api-access-4dvxj\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.990751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-catalog-content\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.991169 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-utilities\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:00 crc kubenswrapper[4769]: I0131 16:35:00.991466 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-catalog-content\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.008728 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dvxj\" (UniqueName: \"kubernetes.io/projected/b8c1fd17-896d-4f5d-b8c5-378a95dceb35-kube-api-access-4dvxj\") pod \"redhat-marketplace-kkrd9\" (UID: \"b8c1fd17-896d-4f5d-b8c5-378a95dceb35\") " pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.092315 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-catalog-content\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.092353 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmswp\" (UniqueName: \"kubernetes.io/projected/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-kube-api-access-jmswp\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.092481 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-utilities\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.103176 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.193181 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-catalog-content\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.193214 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmswp\" (UniqueName: \"kubernetes.io/projected/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-kube-api-access-jmswp\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.193239 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-utilities\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.193649 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-utilities\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.193753 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-catalog-content\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.212617 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmswp\" (UniqueName: \"kubernetes.io/projected/a488f9b4-4988-4cc1-8085-1c4410a2aa7b-kube-api-access-jmswp\") pod \"redhat-operators-scx7g\" (UID: \"a488f9b4-4988-4cc1-8085-1c4410a2aa7b\") " pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.255964 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.494101 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkrd9"] Jan 31 16:35:01 crc kubenswrapper[4769]: W0131 16:35:01.500121 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8c1fd17_896d_4f5d_b8c5_378a95dceb35.slice/crio-f9cb2ac8cf25c8761a45422f9f97dc5a2975205410ba13de2b59dedde38b0e5f WatchSource:0}: Error finding container f9cb2ac8cf25c8761a45422f9f97dc5a2975205410ba13de2b59dedde38b0e5f: Status 404 returned error can't find the container with id f9cb2ac8cf25c8761a45422f9f97dc5a2975205410ba13de2b59dedde38b0e5f Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.633372 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-scx7g"] Jan 31 16:35:01 crc kubenswrapper[4769]: W0131 16:35:01.690159 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda488f9b4_4988_4cc1_8085_1c4410a2aa7b.slice/crio-7df6744efc56a7597c034e60c84b0650a3da8f48e20090b11974143bddc6982c WatchSource:0}: Error finding container 7df6744efc56a7597c034e60c84b0650a3da8f48e20090b11974143bddc6982c: Status 404 returned error can't find the container with id 7df6744efc56a7597c034e60c84b0650a3da8f48e20090b11974143bddc6982c Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.880699 4769 generic.go:334] "Generic (PLEG): container finished" podID="a488f9b4-4988-4cc1-8085-1c4410a2aa7b" containerID="558917886d25ef514a8b99155d0773cfc0803c07b956c7a495dcc6c5725193d2" exitCode=0 Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.880777 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-scx7g" event={"ID":"a488f9b4-4988-4cc1-8085-1c4410a2aa7b","Type":"ContainerDied","Data":"558917886d25ef514a8b99155d0773cfc0803c07b956c7a495dcc6c5725193d2"} Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.880807 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-scx7g" event={"ID":"a488f9b4-4988-4cc1-8085-1c4410a2aa7b","Type":"ContainerStarted","Data":"7df6744efc56a7597c034e60c84b0650a3da8f48e20090b11974143bddc6982c"} Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.883751 4769 generic.go:334] "Generic (PLEG): container finished" podID="b8c1fd17-896d-4f5d-b8c5-378a95dceb35" containerID="a5bf639690abcbe3df4a1071cee2c9efd6b09e77dbaa1b5bf4248db29335a2a1" exitCode=0 Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.884600 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkrd9" event={"ID":"b8c1fd17-896d-4f5d-b8c5-378a95dceb35","Type":"ContainerDied","Data":"a5bf639690abcbe3df4a1071cee2c9efd6b09e77dbaa1b5bf4248db29335a2a1"} Jan 31 16:35:01 crc kubenswrapper[4769]: I0131 16:35:01.884623 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkrd9" event={"ID":"b8c1fd17-896d-4f5d-b8c5-378a95dceb35","Type":"ContainerStarted","Data":"f9cb2ac8cf25c8761a45422f9f97dc5a2975205410ba13de2b59dedde38b0e5f"} Jan 31 16:35:02 crc kubenswrapper[4769]: I0131 16:35:02.892547 4769 generic.go:334] "Generic (PLEG): container finished" podID="b8c1fd17-896d-4f5d-b8c5-378a95dceb35" containerID="7416bd0641c8932c1c948cac0cfefca9a21d2febe2650bcf645c143cbe79c1c7" exitCode=0 Jan 31 16:35:02 crc kubenswrapper[4769]: I0131 16:35:02.892614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkrd9" event={"ID":"b8c1fd17-896d-4f5d-b8c5-378a95dceb35","Type":"ContainerDied","Data":"7416bd0641c8932c1c948cac0cfefca9a21d2febe2650bcf645c143cbe79c1c7"} Jan 31 16:35:02 crc kubenswrapper[4769]: I0131 16:35:02.896749 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-scx7g" event={"ID":"a488f9b4-4988-4cc1-8085-1c4410a2aa7b","Type":"ContainerStarted","Data":"36614c8adfc3d8f613d7abaa44fffb2e812e0f4fa882d8981c00b9e558b67482"} Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.128892 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-znn46"] Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.129841 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.131807 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.140690 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znn46"] Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.319184 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-catalog-content\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.319246 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-utilities\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.319274 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h589\" (UniqueName: \"kubernetes.io/projected/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-kube-api-access-8h589\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.335665 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qskmw"] Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.337272 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.339216 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.340838 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qskmw"] Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420523 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-catalog-content\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420579 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-utilities\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420616 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-catalog-content\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420641 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h589\" (UniqueName: \"kubernetes.io/projected/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-kube-api-access-8h589\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420660 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-utilities\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420681 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6gcf\" (UniqueName: \"kubernetes.io/projected/2b46f121-f349-493b-9835-6ee6b3bc3ec6-kube-api-access-p6gcf\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.420938 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-catalog-content\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.421036 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-utilities\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.447382 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h589\" (UniqueName: \"kubernetes.io/projected/04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99-kube-api-access-8h589\") pod \"community-operators-znn46\" (UID: \"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99\") " pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.461285 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.522915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-catalog-content\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.522997 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-utilities\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.523038 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6gcf\" (UniqueName: \"kubernetes.io/projected/2b46f121-f349-493b-9835-6ee6b3bc3ec6-kube-api-access-p6gcf\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.523790 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-catalog-content\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.523834 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b46f121-f349-493b-9835-6ee6b3bc3ec6-utilities\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.542270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6gcf\" (UniqueName: \"kubernetes.io/projected/2b46f121-f349-493b-9835-6ee6b3bc3ec6-kube-api-access-p6gcf\") pod \"certified-operators-qskmw\" (UID: \"2b46f121-f349-493b-9835-6ee6b3bc3ec6\") " pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.691062 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.891488 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znn46"] Jan 31 16:35:03 crc kubenswrapper[4769]: W0131 16:35:03.897742 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04bbbf10_31ac_42ee_9ea9_bb96cfa7fb99.slice/crio-286754fb98bf8f19d6376295dfd3c070c5b602f8f3910502d1a0eb6e786464fc WatchSource:0}: Error finding container 286754fb98bf8f19d6376295dfd3c070c5b602f8f3910502d1a0eb6e786464fc: Status 404 returned error can't find the container with id 286754fb98bf8f19d6376295dfd3c070c5b602f8f3910502d1a0eb6e786464fc Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.915200 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkrd9" event={"ID":"b8c1fd17-896d-4f5d-b8c5-378a95dceb35","Type":"ContainerStarted","Data":"58c464eeaba1df7da9a6fd0775b8bbd2d6ee9357b90ddcaa2b2b729968216867"} Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.922206 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-scx7g" event={"ID":"a488f9b4-4988-4cc1-8085-1c4410a2aa7b","Type":"ContainerDied","Data":"36614c8adfc3d8f613d7abaa44fffb2e812e0f4fa882d8981c00b9e558b67482"} Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.922197 4769 generic.go:334] "Generic (PLEG): container finished" podID="a488f9b4-4988-4cc1-8085-1c4410a2aa7b" containerID="36614c8adfc3d8f613d7abaa44fffb2e812e0f4fa882d8981c00b9e558b67482" exitCode=0 Jan 31 16:35:03 crc kubenswrapper[4769]: I0131 16:35:03.939440 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kkrd9" podStartSLOduration=2.45500463 podStartE2EDuration="3.939410459s" podCreationTimestamp="2026-01-31 16:35:00 +0000 UTC" firstStartedPulling="2026-01-31 16:35:01.886405575 +0000 UTC m=+349.960574244" lastFinishedPulling="2026-01-31 16:35:03.370811404 +0000 UTC m=+351.444980073" observedRunningTime="2026-01-31 16:35:03.932851319 +0000 UTC m=+352.007019988" watchObservedRunningTime="2026-01-31 16:35:03.939410459 +0000 UTC m=+352.013579128" Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.071004 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qskmw"] Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.929995 4769 generic.go:334] "Generic (PLEG): container finished" podID="04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99" containerID="bf8064784b5419c3452c4a7e74c1b5ee3735c979f6894694f78fc63dd2391118" exitCode=0 Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.930068 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znn46" event={"ID":"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99","Type":"ContainerDied","Data":"bf8064784b5419c3452c4a7e74c1b5ee3735c979f6894694f78fc63dd2391118"} Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.930548 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znn46" event={"ID":"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99","Type":"ContainerStarted","Data":"286754fb98bf8f19d6376295dfd3c070c5b602f8f3910502d1a0eb6e786464fc"} Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.933488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-scx7g" event={"ID":"a488f9b4-4988-4cc1-8085-1c4410a2aa7b","Type":"ContainerStarted","Data":"65f3d9ae93461e4cf5a76638654542f0322f92f1792e3ec325c2afe85e896dd0"} Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.942388 4769 generic.go:334] "Generic (PLEG): container finished" podID="2b46f121-f349-493b-9835-6ee6b3bc3ec6" containerID="6c3d1428ac715b68a4f367ee7c208f8c32f0a3752a2722ac3c0b365784d30463" exitCode=0 Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.942435 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qskmw" event={"ID":"2b46f121-f349-493b-9835-6ee6b3bc3ec6","Type":"ContainerDied","Data":"6c3d1428ac715b68a4f367ee7c208f8c32f0a3752a2722ac3c0b365784d30463"} Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.942477 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qskmw" event={"ID":"2b46f121-f349-493b-9835-6ee6b3bc3ec6","Type":"ContainerStarted","Data":"eafc68c96ae163eee4c93e871b1686d4ffea897b6b7ee1c0a72929c06faa3169"} Jan 31 16:35:04 crc kubenswrapper[4769]: I0131 16:35:04.985346 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-scx7g" podStartSLOduration=2.417682034 podStartE2EDuration="4.98532444s" podCreationTimestamp="2026-01-31 16:35:00 +0000 UTC" firstStartedPulling="2026-01-31 16:35:01.881973623 +0000 UTC m=+349.956142292" lastFinishedPulling="2026-01-31 16:35:04.449616029 +0000 UTC m=+352.523784698" observedRunningTime="2026-01-31 16:35:04.982736599 +0000 UTC m=+353.056905268" watchObservedRunningTime="2026-01-31 16:35:04.98532444 +0000 UTC m=+353.059493119" Jan 31 16:35:05 crc kubenswrapper[4769]: I0131 16:35:05.950717 4769 generic.go:334] "Generic (PLEG): container finished" podID="04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99" containerID="ac6aa4f0a83d4e37459e5784273236aee21236740948fdfc850ccd0b033274ac" exitCode=0 Jan 31 16:35:05 crc kubenswrapper[4769]: I0131 16:35:05.951036 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znn46" event={"ID":"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99","Type":"ContainerDied","Data":"ac6aa4f0a83d4e37459e5784273236aee21236740948fdfc850ccd0b033274ac"} Jan 31 16:35:05 crc kubenswrapper[4769]: I0131 16:35:05.954317 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qskmw" event={"ID":"2b46f121-f349-493b-9835-6ee6b3bc3ec6","Type":"ContainerStarted","Data":"6485e4dd858d178df28b18f406e4e725061bd667a162a66cb0d78074885a010f"} Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.649892 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.650125 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" containerID="cri-o://c12619a9560dfc578a9c5d642a68711e0934897ee665939e718b68d4a437a039" gracePeriod=30 Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.963644 4769 generic.go:334] "Generic (PLEG): container finished" podID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerID="c12619a9560dfc578a9c5d642a68711e0934897ee665939e718b68d4a437a039" exitCode=0 Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.963714 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" event={"ID":"de542b67-94ed-458b-8665-8d4f03f7a0e8","Type":"ContainerDied","Data":"c12619a9560dfc578a9c5d642a68711e0934897ee665939e718b68d4a437a039"} Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.971231 4769 generic.go:334] "Generic (PLEG): container finished" podID="2b46f121-f349-493b-9835-6ee6b3bc3ec6" containerID="6485e4dd858d178df28b18f406e4e725061bd667a162a66cb0d78074885a010f" exitCode=0 Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.971546 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qskmw" event={"ID":"2b46f121-f349-493b-9835-6ee6b3bc3ec6","Type":"ContainerDied","Data":"6485e4dd858d178df28b18f406e4e725061bd667a162a66cb0d78074885a010f"} Jan 31 16:35:06 crc kubenswrapper[4769]: I0131 16:35:06.980967 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znn46" event={"ID":"04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99","Type":"ContainerStarted","Data":"11c761a6b26da776fb3ae1c258665fb7d0c1be1c26f06288b06a91e7b548c578"} Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.019581 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-znn46" podStartSLOduration=2.625137663 podStartE2EDuration="4.019564577s" podCreationTimestamp="2026-01-31 16:35:03 +0000 UTC" firstStartedPulling="2026-01-31 16:35:04.931921771 +0000 UTC m=+353.006090440" lastFinishedPulling="2026-01-31 16:35:06.326348685 +0000 UTC m=+354.400517354" observedRunningTime="2026-01-31 16:35:07.018173829 +0000 UTC m=+355.092342488" watchObservedRunningTime="2026-01-31 16:35:07.019564577 +0000 UTC m=+355.093733256" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.194995 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.373149 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles\") pod \"de542b67-94ed-458b-8665-8d4f03f7a0e8\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.373886 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "de542b67-94ed-458b-8665-8d4f03f7a0e8" (UID: "de542b67-94ed-458b-8665-8d4f03f7a0e8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.373969 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca\") pod \"de542b67-94ed-458b-8665-8d4f03f7a0e8\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.374235 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca" (OuterVolumeSpecName: "client-ca") pod "de542b67-94ed-458b-8665-8d4f03f7a0e8" (UID: "de542b67-94ed-458b-8665-8d4f03f7a0e8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.374622 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert\") pod \"de542b67-94ed-458b-8665-8d4f03f7a0e8\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.374654 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config\") pod \"de542b67-94ed-458b-8665-8d4f03f7a0e8\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.375323 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2rh2\" (UniqueName: \"kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2\") pod \"de542b67-94ed-458b-8665-8d4f03f7a0e8\" (UID: \"de542b67-94ed-458b-8665-8d4f03f7a0e8\") " Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.375254 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config" (OuterVolumeSpecName: "config") pod "de542b67-94ed-458b-8665-8d4f03f7a0e8" (UID: "de542b67-94ed-458b-8665-8d4f03f7a0e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.375546 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.375567 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.375578 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de542b67-94ed-458b-8665-8d4f03f7a0e8-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.379695 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2" (OuterVolumeSpecName: "kube-api-access-s2rh2") pod "de542b67-94ed-458b-8665-8d4f03f7a0e8" (UID: "de542b67-94ed-458b-8665-8d4f03f7a0e8"). InnerVolumeSpecName "kube-api-access-s2rh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.387595 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "de542b67-94ed-458b-8665-8d4f03f7a0e8" (UID: "de542b67-94ed-458b-8665-8d4f03f7a0e8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.476601 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2rh2\" (UniqueName: \"kubernetes.io/projected/de542b67-94ed-458b-8665-8d4f03f7a0e8-kube-api-access-s2rh2\") on node \"crc\" DevicePath \"\"" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.476660 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de542b67-94ed-458b-8665-8d4f03f7a0e8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.810624 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-84b485d7fd-8mhx2"] Jan 31 16:35:07 crc kubenswrapper[4769]: E0131 16:35:07.811109 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.811122 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.811212 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.811645 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.819407 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84b485d7fd-8mhx2"] Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.989055 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.991113 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" event={"ID":"de542b67-94ed-458b-8665-8d4f03f7a0e8","Type":"ContainerDied","Data":"70c973f03e539760d5cdf75b150f9043c65b5dedce983f3465617d118e0f9044"} Jan 31 16:35:07 crc kubenswrapper[4769]: I0131 16:35:07.991156 4769 scope.go:117] "RemoveContainer" containerID="c12619a9560dfc578a9c5d642a68711e0934897ee665939e718b68d4a437a039" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.005678 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmzjr\" (UniqueName: \"kubernetes.io/projected/1b1096cc-4991-472d-94be-755d9bb406ba-kube-api-access-gmzjr\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.005760 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-client-ca\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.005809 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1096cc-4991-472d-94be-755d9bb406ba-serving-cert\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.005854 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-config\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.005877 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-proxy-ca-bundles\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.107168 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1096cc-4991-472d-94be-755d9bb406ba-serving-cert\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.107245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-config\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.107269 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-proxy-ca-bundles\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.107292 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmzjr\" (UniqueName: \"kubernetes.io/projected/1b1096cc-4991-472d-94be-755d9bb406ba-kube-api-access-gmzjr\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.107333 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-client-ca\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.108733 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-client-ca\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.109023 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-proxy-ca-bundles\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.109063 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b1096cc-4991-472d-94be-755d9bb406ba-config\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.109806 4769 patch_prober.go:28] interesting pod/controller-manager-74dc647949-ccv4m container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.109851 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.113059 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b1096cc-4991-472d-94be-755d9bb406ba-serving-cert\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.127349 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmzjr\" (UniqueName: \"kubernetes.io/projected/1b1096cc-4991-472d-94be-755d9bb406ba-kube-api-access-gmzjr\") pod \"controller-manager-84b485d7fd-8mhx2\" (UID: \"1b1096cc-4991-472d-94be-755d9bb406ba\") " pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.769630 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:08 crc kubenswrapper[4769]: I0131 16:35:08.995997 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qskmw" event={"ID":"2b46f121-f349-493b-9835-6ee6b3bc3ec6","Type":"ContainerStarted","Data":"f11058d986c3ede8219f8c18099e409300848be8c696972c684001ad6679a6f9"} Jan 31 16:35:09 crc kubenswrapper[4769]: I0131 16:35:09.015417 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qskmw" podStartSLOduration=3.19995271 podStartE2EDuration="6.01540216s" podCreationTimestamp="2026-01-31 16:35:03 +0000 UTC" firstStartedPulling="2026-01-31 16:35:04.943687295 +0000 UTC m=+353.017855964" lastFinishedPulling="2026-01-31 16:35:07.759136745 +0000 UTC m=+355.833305414" observedRunningTime="2026-01-31 16:35:09.014598167 +0000 UTC m=+357.088766836" watchObservedRunningTime="2026-01-31 16:35:09.01540216 +0000 UTC m=+357.089570829" Jan 31 16:35:09 crc kubenswrapper[4769]: I0131 16:35:09.178834 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84b485d7fd-8mhx2"] Jan 31 16:35:09 crc kubenswrapper[4769]: W0131 16:35:09.185110 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b1096cc_4991_472d_94be_755d9bb406ba.slice/crio-6f552709bb7d71d78150cb89dc7e0f003fa0168b202313d1867fe2b770109dd6 WatchSource:0}: Error finding container 6f552709bb7d71d78150cb89dc7e0f003fa0168b202313d1867fe2b770109dd6: Status 404 returned error can't find the container with id 6f552709bb7d71d78150cb89dc7e0f003fa0168b202313d1867fe2b770109dd6 Jan 31 16:35:10 crc kubenswrapper[4769]: I0131 16:35:10.005620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" event={"ID":"1b1096cc-4991-472d-94be-755d9bb406ba","Type":"ContainerStarted","Data":"0f3d24eef8a2bee52ff65ee3dfdba142b59cfab3f9c010a8827ef8b39cd65732"} Jan 31 16:35:10 crc kubenswrapper[4769]: I0131 16:35:10.005686 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" event={"ID":"1b1096cc-4991-472d-94be-755d9bb406ba","Type":"ContainerStarted","Data":"6f552709bb7d71d78150cb89dc7e0f003fa0168b202313d1867fe2b770109dd6"} Jan 31 16:35:10 crc kubenswrapper[4769]: I0131 16:35:10.006042 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:10 crc kubenswrapper[4769]: I0131 16:35:10.011658 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" Jan 31 16:35:10 crc kubenswrapper[4769]: I0131 16:35:10.024238 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-84b485d7fd-8mhx2" podStartSLOduration=4.02421107 podStartE2EDuration="4.02421107s" podCreationTimestamp="2026-01-31 16:35:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:35:10.020868508 +0000 UTC m=+358.095037177" watchObservedRunningTime="2026-01-31 16:35:10.02421107 +0000 UTC m=+358.098379749" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.103634 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.104261 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.173561 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.256258 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.256319 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:11 crc kubenswrapper[4769]: I0131 16:35:11.300087 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:12 crc kubenswrapper[4769]: I0131 16:35:12.057797 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kkrd9" Jan 31 16:35:12 crc kubenswrapper[4769]: I0131 16:35:12.073926 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-scx7g" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.462424 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.462892 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.521358 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.692069 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.692128 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:13 crc kubenswrapper[4769]: I0131 16:35:13.724967 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:14 crc kubenswrapper[4769]: I0131 16:35:14.391175 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qskmw" Jan 31 16:35:14 crc kubenswrapper[4769]: I0131 16:35:14.464024 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-znn46" Jan 31 16:35:20 crc kubenswrapper[4769]: I0131 16:35:20.682390 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:35:20 crc kubenswrapper[4769]: I0131 16:35:20.682463 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:35:38 crc kubenswrapper[4769]: I0131 16:35:38.749393 4769 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","podde542b67-94ed-458b-8665-8d4f03f7a0e8"] err="unable to destroy cgroup paths for cgroup [kubepods burstable podde542b67-94ed-458b-8665-8d4f03f7a0e8] : Timed out while waiting for systemd to remove kubepods-burstable-podde542b67_94ed_458b_8665_8d4f03f7a0e8.slice" Jan 31 16:35:38 crc kubenswrapper[4769]: E0131 16:35:38.749963 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods burstable podde542b67-94ed-458b-8665-8d4f03f7a0e8] : unable to destroy cgroup paths for cgroup [kubepods burstable podde542b67-94ed-458b-8665-8d4f03f7a0e8] : Timed out while waiting for systemd to remove kubepods-burstable-podde542b67_94ed_458b_8665_8d4f03f7a0e8.slice" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" Jan 31 16:35:39 crc kubenswrapper[4769]: I0131 16:35:39.195167 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74dc647949-ccv4m" Jan 31 16:35:39 crc kubenswrapper[4769]: I0131 16:35:39.241830 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:35:39 crc kubenswrapper[4769]: I0131 16:35:39.250222 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-74dc647949-ccv4m"] Jan 31 16:35:40 crc kubenswrapper[4769]: I0131 16:35:40.717698 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de542b67-94ed-458b-8665-8d4f03f7a0e8" path="/var/lib/kubelet/pods/de542b67-94ed-458b-8665-8d4f03f7a0e8/volumes" Jan 31 16:35:50 crc kubenswrapper[4769]: I0131 16:35:50.682274 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:35:50 crc kubenswrapper[4769]: I0131 16:35:50.683078 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:36:20 crc kubenswrapper[4769]: I0131 16:36:20.681753 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:36:20 crc kubenswrapper[4769]: I0131 16:36:20.683793 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:36:20 crc kubenswrapper[4769]: I0131 16:36:20.683926 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:36:20 crc kubenswrapper[4769]: I0131 16:36:20.684590 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:36:20 crc kubenswrapper[4769]: I0131 16:36:20.684764 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151" gracePeriod=600 Jan 31 16:36:21 crc kubenswrapper[4769]: I0131 16:36:21.478897 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151" exitCode=0 Jan 31 16:36:21 crc kubenswrapper[4769]: I0131 16:36:21.478971 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151"} Jan 31 16:36:21 crc kubenswrapper[4769]: I0131 16:36:21.479376 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315"} Jan 31 16:36:21 crc kubenswrapper[4769]: I0131 16:36:21.479440 4769 scope.go:117] "RemoveContainer" containerID="f48035545fd929a672be1a83a941b13f4b352bdb858af6a412efec46dc7ac217" Jan 31 16:38:20 crc kubenswrapper[4769]: I0131 16:38:20.682542 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:38:20 crc kubenswrapper[4769]: I0131 16:38:20.683299 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:38:50 crc kubenswrapper[4769]: I0131 16:38:50.682864 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:38:50 crc kubenswrapper[4769]: I0131 16:38:50.683554 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:39:20 crc kubenswrapper[4769]: I0131 16:39:20.682075 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:39:20 crc kubenswrapper[4769]: I0131 16:39:20.682671 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:39:20 crc kubenswrapper[4769]: I0131 16:39:20.682733 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:39:20 crc kubenswrapper[4769]: I0131 16:39:20.683395 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:39:20 crc kubenswrapper[4769]: I0131 16:39:20.683488 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315" gracePeriod=600 Jan 31 16:39:21 crc kubenswrapper[4769]: I0131 16:39:21.782990 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315" exitCode=0 Jan 31 16:39:21 crc kubenswrapper[4769]: I0131 16:39:21.783118 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315"} Jan 31 16:39:21 crc kubenswrapper[4769]: I0131 16:39:21.783880 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0"} Jan 31 16:39:21 crc kubenswrapper[4769]: I0131 16:39:21.783915 4769 scope.go:117] "RemoveContainer" containerID="a8bb3feeaa45e2f9b9bc6d64ceaa00a54b12973097c91f5714ebbca3e9dbe151" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.439744 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2r9tc"] Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441066 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-controller" containerID="cri-o://e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441161 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="nbdb" containerID="cri-o://a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441230 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="northd" containerID="cri-o://81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441296 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441343 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="sbdb" containerID="cri-o://e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441358 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-node" containerID="cri-o://7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.441409 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-acl-logging" containerID="cri-o://ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.513294 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" containerID="cri-o://5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" gracePeriod=30 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.820952 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/3.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.823455 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovn-acl-logging/0.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.824040 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovn-controller/0.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.824600 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.868630 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovnkube-controller/3.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.873922 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovn-acl-logging/0.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.874520 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2r9tc_86f2019b-d6ca-4e73-9dac-52fe746489cb/ovn-controller/0.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.874999 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875026 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875033 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875040 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875050 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875057 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" exitCode=0 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875063 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" exitCode=143 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875070 4769 generic.go:334] "Generic (PLEG): container finished" podID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" exitCode=143 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875114 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875126 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875139 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875150 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875160 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875178 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875188 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875200 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875207 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875214 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875221 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875227 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875234 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875240 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875246 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875253 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875262 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875280 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875286 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875291 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875296 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875301 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875306 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875311 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875316 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875321 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875328 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875337 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875343 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875348 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875353 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875358 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875363 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875368 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875374 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875379 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875384 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875391 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2r9tc" event={"ID":"86f2019b-d6ca-4e73-9dac-52fe746489cb","Type":"ContainerDied","Data":"3df7a03ee7c709da23684d8bc4251353a97795fa88b872fcfba9c869f0f93647"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875398 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875405 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875410 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875415 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875421 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875426 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875431 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875436 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875441 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875446 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.875458 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.888564 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/2.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.889206 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/1.log" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.889241 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a7cfe09-9892-494d-a420-5d720afb3df3" containerID="4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612" exitCode=2 Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.889273 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerDied","Data":"4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.889297 4769 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40"} Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.891089 4769 scope.go:117] "RemoveContainer" containerID="4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.891949 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g7kwz"] Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892365 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892392 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892415 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892426 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892440 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="northd" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892448 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="northd" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892470 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-node" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892480 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-node" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892784 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892794 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892810 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892819 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892831 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892842 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892854 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892862 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892861 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-g5kbw_openshift-multus(4a7cfe09-9892-494d-a420-5d720afb3df3)\"" pod="openshift-multus/multus-g5kbw" podUID="4a7cfe09-9892-494d-a420-5d720afb3df3" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892874 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-acl-logging" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892908 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-acl-logging" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.892937 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kubecfg-setup" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.892946 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kubecfg-setup" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.893201 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="nbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893211 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="nbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.893220 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="sbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893228 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="sbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893641 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-acl-logging" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893665 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="nbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893683 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-node" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893705 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovn-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893724 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893734 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893743 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893762 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893774 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="northd" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893783 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="sbdb" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.893795 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: E0131 16:39:35.894121 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.894137 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.894488 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" containerName="ovnkube-controller" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.903571 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.904227 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.929960 4769 scope.go:117] "RemoveContainer" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932756 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932826 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932864 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932900 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash" (OuterVolumeSpecName: "host-slash") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932918 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933032 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933075 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933132 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.932961 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933174 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933052 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933241 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933287 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933367 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933408 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933450 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933480 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933539 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933574 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933606 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933649 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9jb8\" (UniqueName: \"kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933680 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933719 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin\") pod \"86f2019b-d6ca-4e73-9dac-52fe746489cb\" (UID: \"86f2019b-d6ca-4e73-9dac-52fe746489cb\") " Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934148 4769 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934181 4769 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934200 4769 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-slash\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933104 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933135 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933198 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933227 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933638 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933674 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933690 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.933837 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934054 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934247 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934384 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934420 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket" (OuterVolumeSpecName: "log-socket") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.934457 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log" (OuterVolumeSpecName: "node-log") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.935212 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.938418 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.938989 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8" (OuterVolumeSpecName: "kube-api-access-z9jb8") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "kube-api-access-z9jb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.945824 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "86f2019b-d6ca-4e73-9dac-52fe746489cb" (UID: "86f2019b-d6ca-4e73-9dac-52fe746489cb"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.965279 4769 scope.go:117] "RemoveContainer" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.976765 4769 scope.go:117] "RemoveContainer" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:35 crc kubenswrapper[4769]: I0131 16:39:35.987149 4769 scope.go:117] "RemoveContainer" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.005805 4769 scope.go:117] "RemoveContainer" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.021626 4769 scope.go:117] "RemoveContainer" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.035831 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-slash\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.035879 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-script-lib\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036006 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-etc-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036080 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-systemd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036107 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036126 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-config\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036142 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-env-overrides\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036175 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-systemd-units\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036254 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036315 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-node-log\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036335 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-log-socket\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036380 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036404 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsfmp\" (UniqueName: \"kubernetes.io/projected/9702dd23-2515-438e-b1e3-ddbbcbece1f3-kube-api-access-wsfmp\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036434 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-kubelet\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036456 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-ovn\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036601 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-netd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036640 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovn-node-metrics-cert\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036785 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-bin\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036882 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-var-lib-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.036920 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-netns\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037061 4769 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037082 4769 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037095 4769 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-log-socket\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037106 4769 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037115 4769 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-node-log\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037125 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037136 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037147 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9jb8\" (UniqueName: \"kubernetes.io/projected/86f2019b-d6ca-4e73-9dac-52fe746489cb-kube-api-access-z9jb8\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037159 4769 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037171 4769 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037182 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037193 4769 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037202 4769 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037212 4769 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037221 4769 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037232 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/86f2019b-d6ca-4e73-9dac-52fe746489cb-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.037240 4769 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/86f2019b-d6ca-4e73-9dac-52fe746489cb-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.038390 4769 scope.go:117] "RemoveContainer" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.058843 4769 scope.go:117] "RemoveContainer" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.073905 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.074260 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074311 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} err="failed to get container status \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074342 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.074646 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": container with ID starting with a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53 not found: ID does not exist" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074669 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} err="failed to get container status \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": rpc error: code = NotFound desc = could not find container \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": container with ID starting with a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074682 4769 scope.go:117] "RemoveContainer" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.074908 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": container with ID starting with e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f not found: ID does not exist" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074936 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} err="failed to get container status \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": rpc error: code = NotFound desc = could not find container \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": container with ID starting with e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.074953 4769 scope.go:117] "RemoveContainer" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.075162 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": container with ID starting with a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd not found: ID does not exist" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075197 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} err="failed to get container status \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": rpc error: code = NotFound desc = could not find container \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": container with ID starting with a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075221 4769 scope.go:117] "RemoveContainer" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.075411 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": container with ID starting with 81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e not found: ID does not exist" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075443 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} err="failed to get container status \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": rpc error: code = NotFound desc = could not find container \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": container with ID starting with 81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075460 4769 scope.go:117] "RemoveContainer" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.075637 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": container with ID starting with 0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8 not found: ID does not exist" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075660 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} err="failed to get container status \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": rpc error: code = NotFound desc = could not find container \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": container with ID starting with 0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075678 4769 scope.go:117] "RemoveContainer" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.075867 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": container with ID starting with 7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3 not found: ID does not exist" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075885 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} err="failed to get container status \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": rpc error: code = NotFound desc = could not find container \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": container with ID starting with 7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.075898 4769 scope.go:117] "RemoveContainer" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.076082 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": container with ID starting with ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6 not found: ID does not exist" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076109 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} err="failed to get container status \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": rpc error: code = NotFound desc = could not find container \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": container with ID starting with ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076124 4769 scope.go:117] "RemoveContainer" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.076295 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": container with ID starting with e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67 not found: ID does not exist" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076321 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} err="failed to get container status \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": rpc error: code = NotFound desc = could not find container \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": container with ID starting with e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076338 4769 scope.go:117] "RemoveContainer" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: E0131 16:39:36.076519 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": container with ID starting with 8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85 not found: ID does not exist" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076539 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} err="failed to get container status \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": rpc error: code = NotFound desc = could not find container \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": container with ID starting with 8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076552 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076730 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} err="failed to get container status \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076751 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076971 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} err="failed to get container status \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": rpc error: code = NotFound desc = could not find container \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": container with ID starting with a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.076990 4769 scope.go:117] "RemoveContainer" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077140 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} err="failed to get container status \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": rpc error: code = NotFound desc = could not find container \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": container with ID starting with e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077200 4769 scope.go:117] "RemoveContainer" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077457 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} err="failed to get container status \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": rpc error: code = NotFound desc = could not find container \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": container with ID starting with a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077478 4769 scope.go:117] "RemoveContainer" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077735 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} err="failed to get container status \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": rpc error: code = NotFound desc = could not find container \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": container with ID starting with 81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.077792 4769 scope.go:117] "RemoveContainer" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078088 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} err="failed to get container status \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": rpc error: code = NotFound desc = could not find container \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": container with ID starting with 0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078108 4769 scope.go:117] "RemoveContainer" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078355 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} err="failed to get container status \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": rpc error: code = NotFound desc = could not find container \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": container with ID starting with 7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078383 4769 scope.go:117] "RemoveContainer" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078619 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} err="failed to get container status \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": rpc error: code = NotFound desc = could not find container \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": container with ID starting with ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078638 4769 scope.go:117] "RemoveContainer" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078853 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} err="failed to get container status \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": rpc error: code = NotFound desc = could not find container \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": container with ID starting with e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.078882 4769 scope.go:117] "RemoveContainer" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079114 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} err="failed to get container status \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": rpc error: code = NotFound desc = could not find container \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": container with ID starting with 8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079133 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079324 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} err="failed to get container status \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079347 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079545 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} err="failed to get container status \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": rpc error: code = NotFound desc = could not find container \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": container with ID starting with a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079565 4769 scope.go:117] "RemoveContainer" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079729 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} err="failed to get container status \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": rpc error: code = NotFound desc = could not find container \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": container with ID starting with e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079757 4769 scope.go:117] "RemoveContainer" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079962 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} err="failed to get container status \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": rpc error: code = NotFound desc = could not find container \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": container with ID starting with a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.079982 4769 scope.go:117] "RemoveContainer" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080147 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} err="failed to get container status \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": rpc error: code = NotFound desc = could not find container \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": container with ID starting with 81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080165 4769 scope.go:117] "RemoveContainer" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080337 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} err="failed to get container status \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": rpc error: code = NotFound desc = could not find container \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": container with ID starting with 0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080361 4769 scope.go:117] "RemoveContainer" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080575 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} err="failed to get container status \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": rpc error: code = NotFound desc = could not find container \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": container with ID starting with 7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080593 4769 scope.go:117] "RemoveContainer" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080748 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} err="failed to get container status \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": rpc error: code = NotFound desc = could not find container \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": container with ID starting with ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080765 4769 scope.go:117] "RemoveContainer" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080955 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} err="failed to get container status \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": rpc error: code = NotFound desc = could not find container \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": container with ID starting with e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.080971 4769 scope.go:117] "RemoveContainer" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081140 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} err="failed to get container status \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": rpc error: code = NotFound desc = could not find container \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": container with ID starting with 8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081159 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081350 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} err="failed to get container status \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081375 4769 scope.go:117] "RemoveContainer" containerID="a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081556 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53"} err="failed to get container status \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": rpc error: code = NotFound desc = could not find container \"a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53\": container with ID starting with a2f542bc61e702fd04e7f702af083e41309d9ba14e7edaf90b0e34a9b1ab7b53 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081577 4769 scope.go:117] "RemoveContainer" containerID="e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081738 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f"} err="failed to get container status \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": rpc error: code = NotFound desc = could not find container \"e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f\": container with ID starting with e75d0c8304e432af7c88f0035de29d32b39ca6fa31a9f6ab5d713ab65620b51f not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081755 4769 scope.go:117] "RemoveContainer" containerID="a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081915 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd"} err="failed to get container status \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": rpc error: code = NotFound desc = could not find container \"a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd\": container with ID starting with a6924da02d58862ea28e02a64bc876698168e3d7a8d1dbe07e15fc82b1336dfd not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.081932 4769 scope.go:117] "RemoveContainer" containerID="81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082120 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e"} err="failed to get container status \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": rpc error: code = NotFound desc = could not find container \"81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e\": container with ID starting with 81d5e019a569c669817d43449d1cc23770842638bc452cb9b1435f3cccd6a59e not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082143 4769 scope.go:117] "RemoveContainer" containerID="0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082318 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8"} err="failed to get container status \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": rpc error: code = NotFound desc = could not find container \"0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8\": container with ID starting with 0e6d91b7f1ff7bff7cecd5554e93ce9356a64734185882cbe93e0e350fee46c8 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082345 4769 scope.go:117] "RemoveContainer" containerID="7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082552 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3"} err="failed to get container status \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": rpc error: code = NotFound desc = could not find container \"7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3\": container with ID starting with 7bde3146b86b1021bac363a82fc3527651ef666251fb585b0746339112893ae3 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082574 4769 scope.go:117] "RemoveContainer" containerID="ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082751 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6"} err="failed to get container status \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": rpc error: code = NotFound desc = could not find container \"ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6\": container with ID starting with ec262e62e673f4c213793bd5706b5a6819c057f30c73617afe8b41df487209a6 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.082769 4769 scope.go:117] "RemoveContainer" containerID="e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.083009 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67"} err="failed to get container status \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": rpc error: code = NotFound desc = could not find container \"e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67\": container with ID starting with e9d3c6a57b02af3bdd7039185ceb4c3a7927c5655e362871393de2cfba46ec67 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.083050 4769 scope.go:117] "RemoveContainer" containerID="8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.083248 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85"} err="failed to get container status \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": rpc error: code = NotFound desc = could not find container \"8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85\": container with ID starting with 8c15fa6064b305fd57559594bf74f1ee1b45ec57ed93dda5015c7adcb277ef85 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.083266 4769 scope.go:117] "RemoveContainer" containerID="5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.083445 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205"} err="failed to get container status \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": rpc error: code = NotFound desc = could not find container \"5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205\": container with ID starting with 5a289c283ed37ae1109445a2935ece7ff8211c5453a1f152157da1236035f205 not found: ID does not exist" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138342 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-etc-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138389 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-systemd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138413 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138429 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-config\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138443 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-env-overrides\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138460 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-systemd-units\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138481 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138535 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-node-log\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138550 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-log-socket\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138557 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-etc-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138599 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-systemd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138556 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138605 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138678 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsfmp\" (UniqueName: \"kubernetes.io/projected/9702dd23-2515-438e-b1e3-ddbbcbece1f3-kube-api-access-wsfmp\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138715 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-kubelet\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138733 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-node-log\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138737 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-ovn-kubernetes\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138766 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-log-socket\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138783 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-ovn\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-run-ovn\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138816 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-kubelet\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138781 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-systemd-units\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138888 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-netd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138925 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovn-node-metrics-cert\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.138985 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-bin\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139015 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-netd\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139041 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-var-lib-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139056 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-cni-bin\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139084 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-netns\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139119 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-slash\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139143 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-script-lib\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139199 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-run-netns\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139229 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-var-lib-openvswitch\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139258 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9702dd23-2515-438e-b1e3-ddbbcbece1f3-host-slash\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.139996 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-env-overrides\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.140309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-script-lib\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.140450 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovnkube-config\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.142893 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9702dd23-2515-438e-b1e3-ddbbcbece1f3-ovn-node-metrics-cert\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.163001 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsfmp\" (UniqueName: \"kubernetes.io/projected/9702dd23-2515-438e-b1e3-ddbbcbece1f3-kube-api-access-wsfmp\") pod \"ovnkube-node-g7kwz\" (UID: \"9702dd23-2515-438e-b1e3-ddbbcbece1f3\") " pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.211595 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2r9tc"] Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.212182 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2r9tc"] Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.239082 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:36 crc kubenswrapper[4769]: W0131 16:39:36.254883 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9702dd23_2515_438e_b1e3_ddbbcbece1f3.slice/crio-a080a213eb192a5ada37a3d33fc7fc93684d336778498eab9295907b946462b7 WatchSource:0}: Error finding container a080a213eb192a5ada37a3d33fc7fc93684d336778498eab9295907b946462b7: Status 404 returned error can't find the container with id a080a213eb192a5ada37a3d33fc7fc93684d336778498eab9295907b946462b7 Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.720432 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86f2019b-d6ca-4e73-9dac-52fe746489cb" path="/var/lib/kubelet/pods/86f2019b-d6ca-4e73-9dac-52fe746489cb/volumes" Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.898784 4769 generic.go:334] "Generic (PLEG): container finished" podID="9702dd23-2515-438e-b1e3-ddbbcbece1f3" containerID="864f9db470c518a7d2726f0047871a1cee4f3342d9073abe43f31c89094ff9c2" exitCode=0 Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.898838 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerDied","Data":"864f9db470c518a7d2726f0047871a1cee4f3342d9073abe43f31c89094ff9c2"} Jan 31 16:39:36 crc kubenswrapper[4769]: I0131 16:39:36.898920 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"a080a213eb192a5ada37a3d33fc7fc93684d336778498eab9295907b946462b7"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907342 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"d5d20f58bc3765e4bcb2957bfc45e9a0177943d61a2aa623e4423f540ed57c64"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907775 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"66b81e562dbe804c8f137b9f73d5f19f56db01890ed07d5e0c1b849463026463"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907786 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"b2457affda313bb2fe9f3ffac26deb5994a8654d967744995d8d26d91a1be51f"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"f4d188053dc7b6b272d0f4c73d221131e8f0117d03a513216d6aea05eb14242c"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907804 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"04d4d0c4a9242bb7db3808cdde5c0178287f750c040f42fbb3ea20b37ad5e800"} Jan 31 16:39:37 crc kubenswrapper[4769]: I0131 16:39:37.907811 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"d6cefbedf7155cfa201c60deb340223601bba8fd072e05316feb3c1487174cbc"} Jan 31 16:39:40 crc kubenswrapper[4769]: I0131 16:39:40.933124 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"df31af93cbc38d4213c1fac046c49e10ce67e28629f3fffd4f4d54b9c9ed8900"} Jan 31 16:39:42 crc kubenswrapper[4769]: I0131 16:39:42.961947 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" event={"ID":"9702dd23-2515-438e-b1e3-ddbbcbece1f3","Type":"ContainerStarted","Data":"92e1661a79678499bdce246e70535ded3385c8980cd37839bce43c6e252f083a"} Jan 31 16:39:42 crc kubenswrapper[4769]: I0131 16:39:42.962644 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:42 crc kubenswrapper[4769]: I0131 16:39:42.962731 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:42 crc kubenswrapper[4769]: I0131 16:39:42.997704 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:43 crc kubenswrapper[4769]: I0131 16:39:43.002292 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" podStartSLOduration=8.002267836 podStartE2EDuration="8.002267836s" podCreationTimestamp="2026-01-31 16:39:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:39:42.995733845 +0000 UTC m=+631.069902594" watchObservedRunningTime="2026-01-31 16:39:43.002267836 +0000 UTC m=+631.076436535" Jan 31 16:39:43 crc kubenswrapper[4769]: I0131 16:39:43.970020 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:44 crc kubenswrapper[4769]: I0131 16:39:44.046154 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:39:48 crc kubenswrapper[4769]: I0131 16:39:48.943373 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8fb6h"] Jan 31 16:39:48 crc kubenswrapper[4769]: I0131 16:39:48.944684 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:48 crc kubenswrapper[4769]: I0131 16:39:48.968188 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8fb6h"] Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.058915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059012 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-registry-tls\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059052 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwqgs\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-kube-api-access-xwqgs\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-registry-certificates\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059180 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-trusted-ca\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059211 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5b5821e-100f-4010-b609-d47860550119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059322 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5b5821e-100f-4010-b609-d47860550119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.059396 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-bound-sa-token\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.087056 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.160640 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-registry-tls\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.160888 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwqgs\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-kube-api-access-xwqgs\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.160979 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-registry-certificates\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.161072 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5b5821e-100f-4010-b609-d47860550119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.161153 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-trusted-ca\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.161227 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5b5821e-100f-4010-b609-d47860550119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.161309 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-bound-sa-token\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.161602 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c5b5821e-100f-4010-b609-d47860550119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.162691 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-trusted-ca\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.163016 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c5b5821e-100f-4010-b609-d47860550119-registry-certificates\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.166871 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c5b5821e-100f-4010-b609-d47860550119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.167259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-registry-tls\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.181702 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwqgs\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-kube-api-access-xwqgs\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.181830 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c5b5821e-100f-4010-b609-d47860550119-bound-sa-token\") pod \"image-registry-66df7c8f76-8fb6h\" (UID: \"c5b5821e-100f-4010-b609-d47860550119\") " pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: I0131 16:39:49.274696 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: E0131 16:39:49.310707 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(9f774cba52e624b19d0abf614b37569ccd1c6d5a4b7af5618202b87fc7a370be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:39:49 crc kubenswrapper[4769]: E0131 16:39:49.310815 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(9f774cba52e624b19d0abf614b37569ccd1c6d5a4b7af5618202b87fc7a370be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: E0131 16:39:49.310873 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(9f774cba52e624b19d0abf614b37569ccd1c6d5a4b7af5618202b87fc7a370be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:49 crc kubenswrapper[4769]: E0131 16:39:49.310957 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(9f774cba52e624b19d0abf614b37569ccd1c6d5a4b7af5618202b87fc7a370be): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" podUID="c5b5821e-100f-4010-b609-d47860550119" Jan 31 16:39:50 crc kubenswrapper[4769]: I0131 16:39:50.006490 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:50 crc kubenswrapper[4769]: I0131 16:39:50.007146 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:50 crc kubenswrapper[4769]: E0131 16:39:50.038155 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(b608fa09620713d8ddfc766636e77d2863a6779c4393e310ad1308762161efa2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:39:50 crc kubenswrapper[4769]: E0131 16:39:50.038677 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(b608fa09620713d8ddfc766636e77d2863a6779c4393e310ad1308762161efa2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:50 crc kubenswrapper[4769]: E0131 16:39:50.038728 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(b608fa09620713d8ddfc766636e77d2863a6779c4393e310ad1308762161efa2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:39:50 crc kubenswrapper[4769]: E0131 16:39:50.038804 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(b608fa09620713d8ddfc766636e77d2863a6779c4393e310ad1308762161efa2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" podUID="c5b5821e-100f-4010-b609-d47860550119" Jan 31 16:39:50 crc kubenswrapper[4769]: I0131 16:39:50.708189 4769 scope.go:117] "RemoveContainer" containerID="4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612" Jan 31 16:39:50 crc kubenswrapper[4769]: E0131 16:39:50.708447 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-g5kbw_openshift-multus(4a7cfe09-9892-494d-a420-5d720afb3df3)\"" pod="openshift-multus/multus-g5kbw" podUID="4a7cfe09-9892-494d-a420-5d720afb3df3" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.233506 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9"] Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.234904 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.238211 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.250773 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9"] Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.312311 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.312358 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.312452 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6prk\" (UniqueName: \"kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.414053 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.414125 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.414245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6prk\" (UniqueName: \"kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.414799 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.415026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.435892 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6prk\" (UniqueName: \"kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.551794 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.592758 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(34404da330d9f82a970a9f81e9777d339f7e55adabcdbabda6aa4f6590eff3a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.592888 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(34404da330d9f82a970a9f81e9777d339f7e55adabcdbabda6aa4f6590eff3a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.592927 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(34404da330d9f82a970a9f81e9777d339f7e55adabcdbabda6aa4f6590eff3a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.593013 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace(fd84e8ff-8554-4ee0-a41a-35f5146d7873)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace(fd84e8ff-8554-4ee0-a41a-35f5146d7873)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(34404da330d9f82a970a9f81e9777d339f7e55adabcdbabda6aa4f6590eff3a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.708008 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:00 crc kubenswrapper[4769]: I0131 16:40:00.708943 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.739665 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(6398d6ffed4559166b76960a9d64dcbf34e13d6c55fd9bdb004edbff11b2a1f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.739768 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(6398d6ffed4559166b76960a9d64dcbf34e13d6c55fd9bdb004edbff11b2a1f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.739805 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(6398d6ffed4559166b76960a9d64dcbf34e13d6c55fd9bdb004edbff11b2a1f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:00 crc kubenswrapper[4769]: E0131 16:40:00.739885 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"image-registry-66df7c8f76-8fb6h_openshift-image-registry(c5b5821e-100f-4010-b609-d47860550119)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_image-registry-66df7c8f76-8fb6h_openshift-image-registry_c5b5821e-100f-4010-b609-d47860550119_0(6398d6ffed4559166b76960a9d64dcbf34e13d6c55fd9bdb004edbff11b2a1f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" podUID="c5b5821e-100f-4010-b609-d47860550119" Jan 31 16:40:01 crc kubenswrapper[4769]: I0131 16:40:01.068654 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:01 crc kubenswrapper[4769]: I0131 16:40:01.069030 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:01 crc kubenswrapper[4769]: E0131 16:40:01.100982 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(9751063d9cfbd9d3424b65eb39f5ad29e7cd194db01e280fb7076ffaa74f1a73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 31 16:40:01 crc kubenswrapper[4769]: E0131 16:40:01.101042 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(9751063d9cfbd9d3424b65eb39f5ad29e7cd194db01e280fb7076ffaa74f1a73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:01 crc kubenswrapper[4769]: E0131 16:40:01.101072 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(9751063d9cfbd9d3424b65eb39f5ad29e7cd194db01e280fb7076ffaa74f1a73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:01 crc kubenswrapper[4769]: E0131 16:40:01.101124 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace(fd84e8ff-8554-4ee0-a41a-35f5146d7873)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace(fd84e8ff-8554-4ee0-a41a-35f5146d7873)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_openshift-marketplace_fd84e8ff-8554-4ee0-a41a-35f5146d7873_0(9751063d9cfbd9d3424b65eb39f5ad29e7cd194db01e280fb7076ffaa74f1a73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" Jan 31 16:40:03 crc kubenswrapper[4769]: I0131 16:40:03.708385 4769 scope.go:117] "RemoveContainer" containerID="4d02b7b52d4a04cc3175863e1021a77566a5ea07c3c2035a027d8f00b49ec612" Jan 31 16:40:04 crc kubenswrapper[4769]: I0131 16:40:04.095288 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/2.log" Jan 31 16:40:04 crc kubenswrapper[4769]: I0131 16:40:04.096027 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/1.log" Jan 31 16:40:04 crc kubenswrapper[4769]: I0131 16:40:04.096085 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-g5kbw" event={"ID":"4a7cfe09-9892-494d-a420-5d720afb3df3","Type":"ContainerStarted","Data":"a99b02a0548811fe346edf9f051c84276ab2f971a20bd59719aa68cb454844f7"} Jan 31 16:40:06 crc kubenswrapper[4769]: I0131 16:40:06.269742 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g7kwz" Jan 31 16:40:12 crc kubenswrapper[4769]: I0131 16:40:12.940445 4769 scope.go:117] "RemoveContainer" containerID="eb4ceb742b812d9b282ba14b266b4a78550b5dc38d7637c07d1c95256799bc40" Jan 31 16:40:13 crc kubenswrapper[4769]: I0131 16:40:13.163245 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-g5kbw_4a7cfe09-9892-494d-a420-5d720afb3df3/kube-multus/2.log" Jan 31 16:40:13 crc kubenswrapper[4769]: I0131 16:40:13.707954 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:13 crc kubenswrapper[4769]: I0131 16:40:13.708668 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:13 crc kubenswrapper[4769]: I0131 16:40:13.990704 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-8fb6h"] Jan 31 16:40:14 crc kubenswrapper[4769]: W0131 16:40:14.003258 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5b5821e_100f_4010_b609_d47860550119.slice/crio-1f84959e4510f6b78d23575b57698fc9ad431c6e288ec80f46010c19307d3c91 WatchSource:0}: Error finding container 1f84959e4510f6b78d23575b57698fc9ad431c6e288ec80f46010c19307d3c91: Status 404 returned error can't find the container with id 1f84959e4510f6b78d23575b57698fc9ad431c6e288ec80f46010c19307d3c91 Jan 31 16:40:14 crc kubenswrapper[4769]: I0131 16:40:14.172356 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" event={"ID":"c5b5821e-100f-4010-b609-d47860550119","Type":"ContainerStarted","Data":"1f84959e4510f6b78d23575b57698fc9ad431c6e288ec80f46010c19307d3c91"} Jan 31 16:40:15 crc kubenswrapper[4769]: I0131 16:40:15.181321 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" event={"ID":"c5b5821e-100f-4010-b609-d47860550119","Type":"ContainerStarted","Data":"398be091f0c42d731d2de16391a914b192b76e93485b7a08bc0fd70841708c0a"} Jan 31 16:40:15 crc kubenswrapper[4769]: I0131 16:40:15.181791 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:15 crc kubenswrapper[4769]: I0131 16:40:15.212739 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" podStartSLOduration=27.212624755 podStartE2EDuration="27.212624755s" podCreationTimestamp="2026-01-31 16:39:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:40:15.206640632 +0000 UTC m=+663.280809321" watchObservedRunningTime="2026-01-31 16:40:15.212624755 +0000 UTC m=+663.286793464" Jan 31 16:40:15 crc kubenswrapper[4769]: I0131 16:40:15.707934 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:15 crc kubenswrapper[4769]: I0131 16:40:15.708489 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:16 crc kubenswrapper[4769]: I0131 16:40:16.128951 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9"] Jan 31 16:40:16 crc kubenswrapper[4769]: W0131 16:40:16.142309 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd84e8ff_8554_4ee0_a41a_35f5146d7873.slice/crio-5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd WatchSource:0}: Error finding container 5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd: Status 404 returned error can't find the container with id 5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd Jan 31 16:40:16 crc kubenswrapper[4769]: I0131 16:40:16.189388 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" event={"ID":"fd84e8ff-8554-4ee0-a41a-35f5146d7873","Type":"ContainerStarted","Data":"5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd"} Jan 31 16:40:17 crc kubenswrapper[4769]: I0131 16:40:17.196168 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerID="435b84023478ab3d52fc44649ae097a73216f4b7e93c1d8cf4016cee325ec2f4" exitCode=0 Jan 31 16:40:17 crc kubenswrapper[4769]: I0131 16:40:17.196250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" event={"ID":"fd84e8ff-8554-4ee0-a41a-35f5146d7873","Type":"ContainerDied","Data":"435b84023478ab3d52fc44649ae097a73216f4b7e93c1d8cf4016cee325ec2f4"} Jan 31 16:40:17 crc kubenswrapper[4769]: I0131 16:40:17.198387 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 16:40:19 crc kubenswrapper[4769]: I0131 16:40:19.211709 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerID="5842fdbbab32b36ec57fd2aa33d4ea84feae6e04d0e1f280efd0abc500f54095" exitCode=0 Jan 31 16:40:19 crc kubenswrapper[4769]: I0131 16:40:19.211845 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" event={"ID":"fd84e8ff-8554-4ee0-a41a-35f5146d7873","Type":"ContainerDied","Data":"5842fdbbab32b36ec57fd2aa33d4ea84feae6e04d0e1f280efd0abc500f54095"} Jan 31 16:40:20 crc kubenswrapper[4769]: I0131 16:40:20.224202 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerID="7ef7efd37cb08dd5f66723c064fbe50ea9a9772250d63effe4ced15de8d6f1ad" exitCode=0 Jan 31 16:40:20 crc kubenswrapper[4769]: I0131 16:40:20.224263 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" event={"ID":"fd84e8ff-8554-4ee0-a41a-35f5146d7873","Type":"ContainerDied","Data":"7ef7efd37cb08dd5f66723c064fbe50ea9a9772250d63effe4ced15de8d6f1ad"} Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.558113 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.651569 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util\") pod \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.651699 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle\") pod \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.651760 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6prk\" (UniqueName: \"kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk\") pod \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\" (UID: \"fd84e8ff-8554-4ee0-a41a-35f5146d7873\") " Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.654036 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle" (OuterVolumeSpecName: "bundle") pod "fd84e8ff-8554-4ee0-a41a-35f5146d7873" (UID: "fd84e8ff-8554-4ee0-a41a-35f5146d7873"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.660751 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk" (OuterVolumeSpecName: "kube-api-access-z6prk") pod "fd84e8ff-8554-4ee0-a41a-35f5146d7873" (UID: "fd84e8ff-8554-4ee0-a41a-35f5146d7873"). InnerVolumeSpecName "kube-api-access-z6prk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.753256 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.753327 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6prk\" (UniqueName: \"kubernetes.io/projected/fd84e8ff-8554-4ee0-a41a-35f5146d7873-kube-api-access-z6prk\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.878656 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util" (OuterVolumeSpecName: "util") pod "fd84e8ff-8554-4ee0-a41a-35f5146d7873" (UID: "fd84e8ff-8554-4ee0-a41a-35f5146d7873"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:40:21 crc kubenswrapper[4769]: I0131 16:40:21.955574 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd84e8ff-8554-4ee0-a41a-35f5146d7873-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:22 crc kubenswrapper[4769]: I0131 16:40:22.239819 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" event={"ID":"fd84e8ff-8554-4ee0-a41a-35f5146d7873","Type":"ContainerDied","Data":"5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd"} Jan 31 16:40:22 crc kubenswrapper[4769]: I0131 16:40:22.239879 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e57be357a241ad4df9d8a9c5f59083c17cd3a1704be1252abcd48471c9479fd" Jan 31 16:40:22 crc kubenswrapper[4769]: I0131 16:40:22.239888 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9" Jan 31 16:40:29 crc kubenswrapper[4769]: I0131 16:40:29.282537 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-8fb6h" Jan 31 16:40:29 crc kubenswrapper[4769]: I0131 16:40:29.332535 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.706303 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w"] Jan 31 16:40:34 crc kubenswrapper[4769]: E0131 16:40:34.706601 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="pull" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.706616 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="pull" Jan 31 16:40:34 crc kubenswrapper[4769]: E0131 16:40:34.706635 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="extract" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.706643 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="extract" Jan 31 16:40:34 crc kubenswrapper[4769]: E0131 16:40:34.706658 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="util" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.706666 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="util" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.706783 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd84e8ff-8554-4ee0-a41a-35f5146d7873" containerName="extract" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.707209 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.719394 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.719549 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.719608 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.721170 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-c6qzn" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.722236 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.732864 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w"] Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.734958 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-webhook-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.735011 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-apiservice-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.735050 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkv2c\" (UniqueName: \"kubernetes.io/projected/fe37bf4b-0671-44be-b390-3ba344ae8d71-kube-api-access-lkv2c\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.836203 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-webhook-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.836445 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-apiservice-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.836609 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkv2c\" (UniqueName: \"kubernetes.io/projected/fe37bf4b-0671-44be-b390-3ba344ae8d71-kube-api-access-lkv2c\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.842241 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-webhook-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.847545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe37bf4b-0671-44be-b390-3ba344ae8d71-apiservice-cert\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.856590 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkv2c\" (UniqueName: \"kubernetes.io/projected/fe37bf4b-0671-44be-b390-3ba344ae8d71-kube-api-access-lkv2c\") pod \"metallb-operator-controller-manager-7b5894f8dd-87v5w\" (UID: \"fe37bf4b-0671-44be-b390-3ba344ae8d71\") " pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.937339 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh"] Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.937993 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.943326 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-phsvr" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.943411 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.945295 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 31 16:40:34 crc kubenswrapper[4769]: I0131 16:40:34.986648 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh"] Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.025729 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.139134 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q22vk\" (UniqueName: \"kubernetes.io/projected/b465a277-1e9b-4534-93b1-aa164b30d12f-kube-api-access-q22vk\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.139179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-apiservice-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.139205 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-webhook-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.240445 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q22vk\" (UniqueName: \"kubernetes.io/projected/b465a277-1e9b-4534-93b1-aa164b30d12f-kube-api-access-q22vk\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.240485 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-apiservice-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.240520 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-webhook-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.245106 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-webhook-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.259525 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q22vk\" (UniqueName: \"kubernetes.io/projected/b465a277-1e9b-4534-93b1-aa164b30d12f-kube-api-access-q22vk\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.262424 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b465a277-1e9b-4534-93b1-aa164b30d12f-apiservice-cert\") pod \"metallb-operator-webhook-server-b797d4985-n2hhh\" (UID: \"b465a277-1e9b-4534-93b1-aa164b30d12f\") " pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.281404 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w"] Jan 31 16:40:35 crc kubenswrapper[4769]: W0131 16:40:35.289290 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe37bf4b_0671_44be_b390_3ba344ae8d71.slice/crio-afc6bf0b51a915e699b08a72e741209d178f7095fc9e50c6a75beec1ed34e57c WatchSource:0}: Error finding container afc6bf0b51a915e699b08a72e741209d178f7095fc9e50c6a75beec1ed34e57c: Status 404 returned error can't find the container with id afc6bf0b51a915e699b08a72e741209d178f7095fc9e50c6a75beec1ed34e57c Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.312438 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" event={"ID":"fe37bf4b-0671-44be-b390-3ba344ae8d71","Type":"ContainerStarted","Data":"afc6bf0b51a915e699b08a72e741209d178f7095fc9e50c6a75beec1ed34e57c"} Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.551242 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:35 crc kubenswrapper[4769]: I0131 16:40:35.790542 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh"] Jan 31 16:40:35 crc kubenswrapper[4769]: W0131 16:40:35.791316 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb465a277_1e9b_4534_93b1_aa164b30d12f.slice/crio-4d0633d701e8695b2b92971cb580a723fd443e2cf65b792e46944c5d39606bab WatchSource:0}: Error finding container 4d0633d701e8695b2b92971cb580a723fd443e2cf65b792e46944c5d39606bab: Status 404 returned error can't find the container with id 4d0633d701e8695b2b92971cb580a723fd443e2cf65b792e46944c5d39606bab Jan 31 16:40:36 crc kubenswrapper[4769]: I0131 16:40:36.323673 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" event={"ID":"b465a277-1e9b-4534-93b1-aa164b30d12f","Type":"ContainerStarted","Data":"4d0633d701e8695b2b92971cb580a723fd443e2cf65b792e46944c5d39606bab"} Jan 31 16:40:39 crc kubenswrapper[4769]: I0131 16:40:39.361315 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" event={"ID":"fe37bf4b-0671-44be-b390-3ba344ae8d71","Type":"ContainerStarted","Data":"e4b7f819e1a15a5021bdd5a3e31d70da6ea4a8ea19c0d0014451b441e507e140"} Jan 31 16:40:39 crc kubenswrapper[4769]: I0131 16:40:39.361623 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:40:39 crc kubenswrapper[4769]: I0131 16:40:39.381723 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" podStartSLOduration=2.515732493 podStartE2EDuration="5.381706588s" podCreationTimestamp="2026-01-31 16:40:34 +0000 UTC" firstStartedPulling="2026-01-31 16:40:35.291051491 +0000 UTC m=+683.365220160" lastFinishedPulling="2026-01-31 16:40:38.157025586 +0000 UTC m=+686.231194255" observedRunningTime="2026-01-31 16:40:39.38065198 +0000 UTC m=+687.454820659" watchObservedRunningTime="2026-01-31 16:40:39.381706588 +0000 UTC m=+687.455875267" Jan 31 16:40:40 crc kubenswrapper[4769]: I0131 16:40:40.368660 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" event={"ID":"b465a277-1e9b-4534-93b1-aa164b30d12f","Type":"ContainerStarted","Data":"bf25684e3ccc8adc1cdbaefd0842befa1964e7546347cfc7b9c701154d71675f"} Jan 31 16:40:40 crc kubenswrapper[4769]: I0131 16:40:40.368827 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:40 crc kubenswrapper[4769]: I0131 16:40:40.399445 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" podStartSLOduration=2.266814934 podStartE2EDuration="6.399418456s" podCreationTimestamp="2026-01-31 16:40:34 +0000 UTC" firstStartedPulling="2026-01-31 16:40:35.795616818 +0000 UTC m=+683.869785487" lastFinishedPulling="2026-01-31 16:40:39.92822033 +0000 UTC m=+688.002389009" observedRunningTime="2026-01-31 16:40:40.392454248 +0000 UTC m=+688.466622957" watchObservedRunningTime="2026-01-31 16:40:40.399418456 +0000 UTC m=+688.473587135" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.374367 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" podUID="4494904a-b7f5-4141-8a63-3360e03bc528" containerName="registry" containerID="cri-o://642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f" gracePeriod=30 Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.723618 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808148 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808184 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808235 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808420 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808439 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808457 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77rgj\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808707 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.808803 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets\") pod \"4494904a-b7f5-4141-8a63-3360e03bc528\" (UID: \"4494904a-b7f5-4141-8a63-3360e03bc528\") " Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.810307 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.811031 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.817102 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.817299 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.818686 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj" (OuterVolumeSpecName: "kube-api-access-77rgj") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "kube-api-access-77rgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.821201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.824863 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.827572 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "4494904a-b7f5-4141-8a63-3360e03bc528" (UID: "4494904a-b7f5-4141-8a63-3360e03bc528"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910531 4769 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/4494904a-b7f5-4141-8a63-3360e03bc528-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910856 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910866 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77rgj\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-kube-api-access-77rgj\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910879 4769 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910888 4769 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/4494904a-b7f5-4141-8a63-3360e03bc528-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910897 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4494904a-b7f5-4141-8a63-3360e03bc528-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:54 crc kubenswrapper[4769]: I0131 16:40:54.910905 4769 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/4494904a-b7f5-4141-8a63-3360e03bc528-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.484284 4769 generic.go:334] "Generic (PLEG): container finished" podID="4494904a-b7f5-4141-8a63-3360e03bc528" containerID="642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f" exitCode=0 Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.484321 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.484334 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" event={"ID":"4494904a-b7f5-4141-8a63-3360e03bc528","Type":"ContainerDied","Data":"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f"} Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.486108 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-vdcnf" event={"ID":"4494904a-b7f5-4141-8a63-3360e03bc528","Type":"ContainerDied","Data":"4a6e46ae6cd8c1772a959e2053add752cf78bcaad102acfcb8b9e8384eba523e"} Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.486170 4769 scope.go:117] "RemoveContainer" containerID="642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f" Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.506976 4769 scope.go:117] "RemoveContainer" containerID="642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f" Jan 31 16:40:55 crc kubenswrapper[4769]: E0131 16:40:55.507631 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f\": container with ID starting with 642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f not found: ID does not exist" containerID="642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f" Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.507668 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f"} err="failed to get container status \"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f\": rpc error: code = NotFound desc = could not find container \"642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f\": container with ID starting with 642b2e5039e3689f75c97a49c50027ac9c8193290a92ae15c6ca068eb171384f not found: ID does not exist" Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.516174 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.520439 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-vdcnf"] Jan 31 16:40:55 crc kubenswrapper[4769]: I0131 16:40:55.558461 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-b797d4985-n2hhh" Jan 31 16:40:56 crc kubenswrapper[4769]: I0131 16:40:56.723151 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4494904a-b7f5-4141-8a63-3360e03bc528" path="/var/lib/kubelet/pods/4494904a-b7f5-4141-8a63-3360e03bc528/volumes" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.033721 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7b5894f8dd-87v5w" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.801681 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-t6zmk"] Jan 31 16:41:15 crc kubenswrapper[4769]: E0131 16:41:15.801950 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4494904a-b7f5-4141-8a63-3360e03bc528" containerName="registry" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.801965 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4494904a-b7f5-4141-8a63-3360e03bc528" containerName="registry" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.802076 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4494904a-b7f5-4141-8a63-3360e03bc528" containerName="registry" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.804343 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.823127 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5"] Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.824792 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.824993 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.825471 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-25tk9" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.827641 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.829255 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5"] Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.829673 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.899183 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-txm8b"] Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.900279 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-txm8b" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.902409 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.902948 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.903255 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-4wqck" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.904434 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.912981 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.913236 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42156100-4827-4e7a-9cce-ec90993228af-frr-startup\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.913396 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.913574 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-metrics\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.913731 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-reloader\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.913879 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9k42\" (UniqueName: \"kubernetes.io/projected/42156100-4827-4e7a-9cce-ec90993228af-kube-api-access-w9k42\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.914054 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-conf\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.914206 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjbmf\" (UniqueName: \"kubernetes.io/projected/093fd348-a77b-4736-97a9-3fe20a0a63f9-kube-api-access-tjbmf\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.914363 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-sockets\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.914625 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-tjsrv"] Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.915404 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.917229 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 31 16:41:15 crc kubenswrapper[4769]: I0131 16:41:15.938543 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-tjsrv"] Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016231 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016288 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-cert\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016317 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42156100-4827-4e7a-9cce-ec90993228af-frr-startup\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016336 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016352 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-metrics\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016369 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-reloader\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016385 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9k42\" (UniqueName: \"kubernetes.io/projected/42156100-4827-4e7a-9cce-ec90993228af-kube-api-access-w9k42\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016404 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metallb-excludel2\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016427 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-metrics-certs\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.016439 4769 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.016538 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs podName:42156100-4827-4e7a-9cce-ec90993228af nodeName:}" failed. No retries permitted until 2026-01-31 16:41:16.51651685 +0000 UTC m=+724.590685519 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs") pod "frr-k8s-t6zmk" (UID: "42156100-4827-4e7a-9cce-ec90993228af") : secret "frr-k8s-certs-secret" not found Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016446 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b2d7\" (UniqueName: \"kubernetes.io/projected/7d9baf9c-e190-482c-85c3-adff9ee82cd7-kube-api-access-5b2d7\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016793 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsbch\" (UniqueName: \"kubernetes.io/projected/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-kube-api-access-wsbch\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016851 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-conf\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016910 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjbmf\" (UniqueName: \"kubernetes.io/projected/093fd348-a77b-4736-97a9-3fe20a0a63f9-kube-api-access-tjbmf\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016948 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016972 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-sockets\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.016991 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.017200 4769 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.017323 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert podName:093fd348-a77b-4736-97a9-3fe20a0a63f9 nodeName:}" failed. No retries permitted until 2026-01-31 16:41:16.51729055 +0000 UTC m=+724.591459219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert") pod "frr-k8s-webhook-server-7df86c4f6c-2jzj5" (UID: "093fd348-a77b-4736-97a9-3fe20a0a63f9") : secret "frr-k8s-webhook-server-cert" not found Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.017323 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/42156100-4827-4e7a-9cce-ec90993228af-frr-startup\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.017594 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-conf\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.017589 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-reloader\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.017947 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-frr-sockets\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.017962 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/42156100-4827-4e7a-9cce-ec90993228af-metrics\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.041848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjbmf\" (UniqueName: \"kubernetes.io/projected/093fd348-a77b-4736-97a9-3fe20a0a63f9-kube-api-access-tjbmf\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.042034 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9k42\" (UniqueName: \"kubernetes.io/projected/42156100-4827-4e7a-9cce-ec90993228af-kube-api-access-w9k42\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118582 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-cert\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118648 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metallb-excludel2\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118674 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-metrics-certs\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118696 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b2d7\" (UniqueName: \"kubernetes.io/projected/7d9baf9c-e190-482c-85c3-adff9ee82cd7-kube-api-access-5b2d7\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsbch\" (UniqueName: \"kubernetes.io/projected/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-kube-api-access-wsbch\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118737 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.118755 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.118867 4769 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.118915 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs podName:1c91ccf4-70f7-4203-be06-eeaa0bd439ba nodeName:}" failed. No retries permitted until 2026-01-31 16:41:16.61890037 +0000 UTC m=+724.693069039 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs") pod "speaker-txm8b" (UID: "1c91ccf4-70f7-4203-be06-eeaa0bd439ba") : secret "speaker-certs-secret" not found Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.119209 4769 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 31 16:41:16 crc kubenswrapper[4769]: E0131 16:41:16.119255 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist podName:1c91ccf4-70f7-4203-be06-eeaa0bd439ba nodeName:}" failed. No retries permitted until 2026-01-31 16:41:16.619247829 +0000 UTC m=+724.693416498 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist") pod "speaker-txm8b" (UID: "1c91ccf4-70f7-4203-be06-eeaa0bd439ba") : secret "metallb-memberlist" not found Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.119941 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metallb-excludel2\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.120570 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.123174 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-metrics-certs\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.133940 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7d9baf9c-e190-482c-85c3-adff9ee82cd7-cert\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.134728 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b2d7\" (UniqueName: \"kubernetes.io/projected/7d9baf9c-e190-482c-85c3-adff9ee82cd7-kube-api-access-5b2d7\") pod \"controller-6968d8fdc4-tjsrv\" (UID: \"7d9baf9c-e190-482c-85c3-adff9ee82cd7\") " pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.139715 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsbch\" (UniqueName: \"kubernetes.io/projected/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-kube-api-access-wsbch\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.227607 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.429911 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-tjsrv"] Jan 31 16:41:16 crc kubenswrapper[4769]: W0131 16:41:16.439712 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d9baf9c_e190_482c_85c3_adff9ee82cd7.slice/crio-73f3fecf5e6a6d6dd36de113b2d348ca77d9c440f09a1dade0bb90b290b8cb84 WatchSource:0}: Error finding container 73f3fecf5e6a6d6dd36de113b2d348ca77d9c440f09a1dade0bb90b290b8cb84: Status 404 returned error can't find the container with id 73f3fecf5e6a6d6dd36de113b2d348ca77d9c440f09a1dade0bb90b290b8cb84 Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.527889 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.528435 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.532864 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42156100-4827-4e7a-9cce-ec90993228af-metrics-certs\") pod \"frr-k8s-t6zmk\" (UID: \"42156100-4827-4e7a-9cce-ec90993228af\") " pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.533528 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/093fd348-a77b-4736-97a9-3fe20a0a63f9-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2jzj5\" (UID: \"093fd348-a77b-4736-97a9-3fe20a0a63f9\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.622817 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tjsrv" event={"ID":"7d9baf9c-e190-482c-85c3-adff9ee82cd7","Type":"ContainerStarted","Data":"d2573541a9f729981cc58015d5596e50b47b29380f27325b04ca9fb0ca955e3f"} Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.622890 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tjsrv" event={"ID":"7d9baf9c-e190-482c-85c3-adff9ee82cd7","Type":"ContainerStarted","Data":"73f3fecf5e6a6d6dd36de113b2d348ca77d9c440f09a1dade0bb90b290b8cb84"} Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.630386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.630439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.634287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-metrics-certs\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.634877 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1c91ccf4-70f7-4203-be06-eeaa0bd439ba-memberlist\") pod \"speaker-txm8b\" (UID: \"1c91ccf4-70f7-4203-be06-eeaa0bd439ba\") " pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.724447 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.743162 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:16 crc kubenswrapper[4769]: I0131 16:41:16.813336 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-txm8b" Jan 31 16:41:16 crc kubenswrapper[4769]: W0131 16:41:16.845639 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c91ccf4_70f7_4203_be06_eeaa0bd439ba.slice/crio-ef0a30142f91c6fe3710321b493d23d691428cb13a49c118ec7cbf9a93cc3486 WatchSource:0}: Error finding container ef0a30142f91c6fe3710321b493d23d691428cb13a49c118ec7cbf9a93cc3486: Status 404 returned error can't find the container with id ef0a30142f91c6fe3710321b493d23d691428cb13a49c118ec7cbf9a93cc3486 Jan 31 16:41:17 crc kubenswrapper[4769]: I0131 16:41:17.048093 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5"] Jan 31 16:41:17 crc kubenswrapper[4769]: I0131 16:41:17.631905 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" event={"ID":"093fd348-a77b-4736-97a9-3fe20a0a63f9","Type":"ContainerStarted","Data":"bb8d673ed749a201363d29a6ac770e6f8fab35122df09e76d6c14364fb4ffb08"} Jan 31 16:41:17 crc kubenswrapper[4769]: I0131 16:41:17.633293 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"9c61d925997dd1ed44c6fd6004e2ceb5575b27a2c0106363685591c5849cad77"} Jan 31 16:41:17 crc kubenswrapper[4769]: I0131 16:41:17.636880 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-txm8b" event={"ID":"1c91ccf4-70f7-4203-be06-eeaa0bd439ba","Type":"ContainerStarted","Data":"739e58c3f2ec2e188d3aae0eb73e16958d56283ecf5fc7e930203dfa1afd750d"} Jan 31 16:41:17 crc kubenswrapper[4769]: I0131 16:41:17.636948 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-txm8b" event={"ID":"1c91ccf4-70f7-4203-be06-eeaa0bd439ba","Type":"ContainerStarted","Data":"ef0a30142f91c6fe3710321b493d23d691428cb13a49c118ec7cbf9a93cc3486"} Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.660710 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-tjsrv" event={"ID":"7d9baf9c-e190-482c-85c3-adff9ee82cd7","Type":"ContainerStarted","Data":"a47ed268e92a49abda902efc75f3b17a1e272b7fe1cb198d9deb6ca18334580c"} Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.662687 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.666295 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-txm8b" event={"ID":"1c91ccf4-70f7-4203-be06-eeaa0bd439ba","Type":"ContainerStarted","Data":"828751953b889d016e8a351ab14781416f0c4691effa9b1de488a897ff736a69"} Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.667032 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-txm8b" Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.681728 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-tjsrv" podStartSLOduration=2.5371020619999998 podStartE2EDuration="5.681710052s" podCreationTimestamp="2026-01-31 16:41:15 +0000 UTC" firstStartedPulling="2026-01-31 16:41:16.568154283 +0000 UTC m=+724.642322962" lastFinishedPulling="2026-01-31 16:41:19.712762283 +0000 UTC m=+727.786930952" observedRunningTime="2026-01-31 16:41:20.677521615 +0000 UTC m=+728.751690304" watchObservedRunningTime="2026-01-31 16:41:20.681710052 +0000 UTC m=+728.755878721" Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.682769 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.682833 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:41:20 crc kubenswrapper[4769]: I0131 16:41:20.715871 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-txm8b" podStartSLOduration=3.267301743 podStartE2EDuration="5.715852316s" podCreationTimestamp="2026-01-31 16:41:15 +0000 UTC" firstStartedPulling="2026-01-31 16:41:17.271145958 +0000 UTC m=+725.345314627" lastFinishedPulling="2026-01-31 16:41:19.719696531 +0000 UTC m=+727.793865200" observedRunningTime="2026-01-31 16:41:20.712360196 +0000 UTC m=+728.786528885" watchObservedRunningTime="2026-01-31 16:41:20.715852316 +0000 UTC m=+728.790020985" Jan 31 16:41:24 crc kubenswrapper[4769]: I0131 16:41:24.688518 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" event={"ID":"093fd348-a77b-4736-97a9-3fe20a0a63f9","Type":"ContainerStarted","Data":"0d2383421dd5d0c3178e4478b1c603126d4e21a11a85de83add7eab445c1bb5c"} Jan 31 16:41:24 crc kubenswrapper[4769]: I0131 16:41:24.690465 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:24 crc kubenswrapper[4769]: I0131 16:41:24.691153 4769 generic.go:334] "Generic (PLEG): container finished" podID="42156100-4827-4e7a-9cce-ec90993228af" containerID="5bc7d8b17e61a74d175580468e6ea0aab67d5b9bab6e9bda14665b5a26b15b0c" exitCode=0 Jan 31 16:41:24 crc kubenswrapper[4769]: I0131 16:41:24.691231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerDied","Data":"5bc7d8b17e61a74d175580468e6ea0aab67d5b9bab6e9bda14665b5a26b15b0c"} Jan 31 16:41:24 crc kubenswrapper[4769]: I0131 16:41:24.735543 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" podStartSLOduration=3.226903621 podStartE2EDuration="9.735528884s" podCreationTimestamp="2026-01-31 16:41:15 +0000 UTC" firstStartedPulling="2026-01-31 16:41:17.0612974 +0000 UTC m=+725.135466069" lastFinishedPulling="2026-01-31 16:41:23.569922663 +0000 UTC m=+731.644091332" observedRunningTime="2026-01-31 16:41:24.725447505 +0000 UTC m=+732.799616214" watchObservedRunningTime="2026-01-31 16:41:24.735528884 +0000 UTC m=+732.809697553" Jan 31 16:41:25 crc kubenswrapper[4769]: I0131 16:41:25.700479 4769 generic.go:334] "Generic (PLEG): container finished" podID="42156100-4827-4e7a-9cce-ec90993228af" containerID="4d25a841ce542f27519d4ed552ec38c7e4613a4fa8ac7e1ddcdcd1de6ee707ed" exitCode=0 Jan 31 16:41:25 crc kubenswrapper[4769]: I0131 16:41:25.700651 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerDied","Data":"4d25a841ce542f27519d4ed552ec38c7e4613a4fa8ac7e1ddcdcd1de6ee707ed"} Jan 31 16:41:26 crc kubenswrapper[4769]: I0131 16:41:26.231473 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-tjsrv" Jan 31 16:41:26 crc kubenswrapper[4769]: I0131 16:41:26.709932 4769 generic.go:334] "Generic (PLEG): container finished" podID="42156100-4827-4e7a-9cce-ec90993228af" containerID="155609c9a1b8ffdc8615a831e1b296a3d0d0eea41763164d6b29b999208e25c7" exitCode=0 Jan 31 16:41:26 crc kubenswrapper[4769]: I0131 16:41:26.717708 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerDied","Data":"155609c9a1b8ffdc8615a831e1b296a3d0d0eea41763164d6b29b999208e25c7"} Jan 31 16:41:27 crc kubenswrapper[4769]: I0131 16:41:27.721777 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"6dd492c90b1f7ec0ef88fb1dd4115e10559b9b982fe7a2a8f6f5df55b19a9a04"} Jan 31 16:41:27 crc kubenswrapper[4769]: I0131 16:41:27.722127 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"4d62499ef72156589d328bd9803fb6031f27815804dd19f4a23a8e85bda2c0b6"} Jan 31 16:41:27 crc kubenswrapper[4769]: I0131 16:41:27.722143 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"89e9c77a211046bcb9acffda7c6d71c3768c4db4d6b07b2e66b29adeff34b742"} Jan 31 16:41:27 crc kubenswrapper[4769]: I0131 16:41:27.722155 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"a6026d6173cadb69d1fb6e3de36d879050d18b119d28fc6c49a2456d8921351b"} Jan 31 16:41:27 crc kubenswrapper[4769]: I0131 16:41:27.722166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"2ea18250bf41c57f154312efbbca2ef6de75f406e4fdad2738e54aedf57b190d"} Jan 31 16:41:28 crc kubenswrapper[4769]: I0131 16:41:28.737625 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-t6zmk" event={"ID":"42156100-4827-4e7a-9cce-ec90993228af","Type":"ContainerStarted","Data":"87d9fe6dd1758f1977834003aea5f367d797e50b87ed899051668bbb2392c7dd"} Jan 31 16:41:28 crc kubenswrapper[4769]: I0131 16:41:28.737870 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:28 crc kubenswrapper[4769]: I0131 16:41:28.776615 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-t6zmk" podStartSLOduration=7.110414434 podStartE2EDuration="13.776595829s" podCreationTimestamp="2026-01-31 16:41:15 +0000 UTC" firstStartedPulling="2026-01-31 16:41:16.942645564 +0000 UTC m=+725.016814243" lastFinishedPulling="2026-01-31 16:41:23.608826969 +0000 UTC m=+731.682995638" observedRunningTime="2026-01-31 16:41:28.769324683 +0000 UTC m=+736.843493392" watchObservedRunningTime="2026-01-31 16:41:28.776595829 +0000 UTC m=+736.850764518" Jan 31 16:41:31 crc kubenswrapper[4769]: I0131 16:41:31.725318 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:31 crc kubenswrapper[4769]: I0131 16:41:31.791597 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:36 crc kubenswrapper[4769]: I0131 16:41:36.739978 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-t6zmk" Jan 31 16:41:36 crc kubenswrapper[4769]: I0131 16:41:36.758469 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2jzj5" Jan 31 16:41:36 crc kubenswrapper[4769]: I0131 16:41:36.822211 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-txm8b" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.407090 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.408298 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.415318 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.415888 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-xrf45" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.423528 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.423909 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.597532 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwhmj\" (UniqueName: \"kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj\") pod \"mariadb-operator-index-d82gv\" (UID: \"ae706cd3-99c0-43e0-b026-3c5345caf0bb\") " pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.698839 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwhmj\" (UniqueName: \"kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj\") pod \"mariadb-operator-index-d82gv\" (UID: \"ae706cd3-99c0-43e0-b026-3c5345caf0bb\") " pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.717085 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwhmj\" (UniqueName: \"kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj\") pod \"mariadb-operator-index-d82gv\" (UID: \"ae706cd3-99c0-43e0-b026-3c5345caf0bb\") " pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:42 crc kubenswrapper[4769]: I0131 16:41:42.763010 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:43 crc kubenswrapper[4769]: I0131 16:41:43.181843 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:43 crc kubenswrapper[4769]: I0131 16:41:43.840486 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-d82gv" event={"ID":"ae706cd3-99c0-43e0-b026-3c5345caf0bb","Type":"ContainerStarted","Data":"33d6e0cfe65458dc3d9c095b10a6efaacac28831944d68862922a6af9d3c70ea"} Jan 31 16:41:44 crc kubenswrapper[4769]: I0131 16:41:44.848699 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-d82gv" event={"ID":"ae706cd3-99c0-43e0-b026-3c5345caf0bb","Type":"ContainerStarted","Data":"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5"} Jan 31 16:41:44 crc kubenswrapper[4769]: I0131 16:41:44.872557 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-d82gv" podStartSLOduration=2.034202842 podStartE2EDuration="2.872530409s" podCreationTimestamp="2026-01-31 16:41:42 +0000 UTC" firstStartedPulling="2026-01-31 16:41:43.198582034 +0000 UTC m=+751.272750703" lastFinishedPulling="2026-01-31 16:41:44.036909601 +0000 UTC m=+752.111078270" observedRunningTime="2026-01-31 16:41:44.864415221 +0000 UTC m=+752.938583920" watchObservedRunningTime="2026-01-31 16:41:44.872530409 +0000 UTC m=+752.946699108" Jan 31 16:41:44 crc kubenswrapper[4769]: I0131 16:41:44.909228 4769 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 31 16:41:45 crc kubenswrapper[4769]: I0131 16:41:45.181701 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:45 crc kubenswrapper[4769]: I0131 16:41:45.788661 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-594rf"] Jan 31 16:41:45 crc kubenswrapper[4769]: I0131 16:41:45.790359 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:45 crc kubenswrapper[4769]: I0131 16:41:45.801424 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-594rf"] Jan 31 16:41:45 crc kubenswrapper[4769]: I0131 16:41:45.939041 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zddx7\" (UniqueName: \"kubernetes.io/projected/d0deba60-bef5-4108-ab00-ab378d8c9a3b-kube-api-access-zddx7\") pod \"mariadb-operator-index-594rf\" (UID: \"d0deba60-bef5-4108-ab00-ab378d8c9a3b\") " pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.040404 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zddx7\" (UniqueName: \"kubernetes.io/projected/d0deba60-bef5-4108-ab00-ab378d8c9a3b-kube-api-access-zddx7\") pod \"mariadb-operator-index-594rf\" (UID: \"d0deba60-bef5-4108-ab00-ab378d8c9a3b\") " pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.063420 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zddx7\" (UniqueName: \"kubernetes.io/projected/d0deba60-bef5-4108-ab00-ab378d8c9a3b-kube-api-access-zddx7\") pod \"mariadb-operator-index-594rf\" (UID: \"d0deba60-bef5-4108-ab00-ab378d8c9a3b\") " pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.107861 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.602769 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-594rf"] Jan 31 16:41:46 crc kubenswrapper[4769]: W0131 16:41:46.604595 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0deba60_bef5_4108_ab00_ab378d8c9a3b.slice/crio-dd6161631c79870edf3f1237ed6d3c6e72d384dd12f6db43907f7802df902719 WatchSource:0}: Error finding container dd6161631c79870edf3f1237ed6d3c6e72d384dd12f6db43907f7802df902719: Status 404 returned error can't find the container with id dd6161631c79870edf3f1237ed6d3c6e72d384dd12f6db43907f7802df902719 Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.863196 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-594rf" event={"ID":"d0deba60-bef5-4108-ab00-ab378d8c9a3b","Type":"ContainerStarted","Data":"dd6161631c79870edf3f1237ed6d3c6e72d384dd12f6db43907f7802df902719"} Jan 31 16:41:46 crc kubenswrapper[4769]: I0131 16:41:46.863306 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-d82gv" podUID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" containerName="registry-server" containerID="cri-o://ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5" gracePeriod=2 Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.275784 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.457810 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwhmj\" (UniqueName: \"kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj\") pod \"ae706cd3-99c0-43e0-b026-3c5345caf0bb\" (UID: \"ae706cd3-99c0-43e0-b026-3c5345caf0bb\") " Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.466229 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj" (OuterVolumeSpecName: "kube-api-access-gwhmj") pod "ae706cd3-99c0-43e0-b026-3c5345caf0bb" (UID: "ae706cd3-99c0-43e0-b026-3c5345caf0bb"). InnerVolumeSpecName "kube-api-access-gwhmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.559547 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwhmj\" (UniqueName: \"kubernetes.io/projected/ae706cd3-99c0-43e0-b026-3c5345caf0bb-kube-api-access-gwhmj\") on node \"crc\" DevicePath \"\"" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.879679 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-594rf" event={"ID":"d0deba60-bef5-4108-ab00-ab378d8c9a3b","Type":"ContainerStarted","Data":"7f9366932fba322c566c348f64a098ca022226905f848dccd0afc65a02eb5e4b"} Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.882688 4769 generic.go:334] "Generic (PLEG): container finished" podID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" containerID="ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5" exitCode=0 Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.882742 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-d82gv" event={"ID":"ae706cd3-99c0-43e0-b026-3c5345caf0bb","Type":"ContainerDied","Data":"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5"} Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.882772 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-d82gv" event={"ID":"ae706cd3-99c0-43e0-b026-3c5345caf0bb","Type":"ContainerDied","Data":"33d6e0cfe65458dc3d9c095b10a6efaacac28831944d68862922a6af9d3c70ea"} Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.882803 4769 scope.go:117] "RemoveContainer" containerID="ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.882964 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-d82gv" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.912800 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-594rf" podStartSLOduration=2.48600177 podStartE2EDuration="2.912774488s" podCreationTimestamp="2026-01-31 16:41:45 +0000 UTC" firstStartedPulling="2026-01-31 16:41:46.608889141 +0000 UTC m=+754.683057840" lastFinishedPulling="2026-01-31 16:41:47.035661849 +0000 UTC m=+755.109830558" observedRunningTime="2026-01-31 16:41:47.905351479 +0000 UTC m=+755.979520188" watchObservedRunningTime="2026-01-31 16:41:47.912774488 +0000 UTC m=+755.986943197" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.915912 4769 scope.go:117] "RemoveContainer" containerID="ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5" Jan 31 16:41:47 crc kubenswrapper[4769]: E0131 16:41:47.916743 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5\": container with ID starting with ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5 not found: ID does not exist" containerID="ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.916801 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5"} err="failed to get container status \"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5\": rpc error: code = NotFound desc = could not find container \"ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5\": container with ID starting with ac2cd6c3e28e032d299d52d731a9eb1a03827b47dc012ecb8514e063138308d5 not found: ID does not exist" Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.941351 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:47 crc kubenswrapper[4769]: I0131 16:41:47.951087 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-d82gv"] Jan 31 16:41:48 crc kubenswrapper[4769]: I0131 16:41:48.716435 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" path="/var/lib/kubelet/pods/ae706cd3-99c0-43e0-b026-3c5345caf0bb/volumes" Jan 31 16:41:50 crc kubenswrapper[4769]: I0131 16:41:50.682569 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:41:50 crc kubenswrapper[4769]: I0131 16:41:50.682669 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:41:56 crc kubenswrapper[4769]: I0131 16:41:56.108112 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:56 crc kubenswrapper[4769]: I0131 16:41:56.108769 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:56 crc kubenswrapper[4769]: I0131 16:41:56.152601 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:41:56 crc kubenswrapper[4769]: I0131 16:41:56.990357 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-594rf" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.205836 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8"] Jan 31 16:42:03 crc kubenswrapper[4769]: E0131 16:42:03.206680 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" containerName="registry-server" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.206714 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" containerName="registry-server" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.207018 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae706cd3-99c0-43e0-b026-3c5345caf0bb" containerName="registry-server" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.208469 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.211934 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.225198 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8"] Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.386763 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.386874 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x5jl\" (UniqueName: \"kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.386987 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.488433 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x5jl\" (UniqueName: \"kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.488636 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.488793 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.489394 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.489598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.530939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x5jl\" (UniqueName: \"kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl\") pod \"f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.533744 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:03 crc kubenswrapper[4769]: I0131 16:42:03.803172 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8"] Jan 31 16:42:04 crc kubenswrapper[4769]: I0131 16:42:04.004202 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerStarted","Data":"0f829c368e61e49e4dcf3829eafa843b6d326545f32c33f4fe36753b1b6f9230"} Jan 31 16:42:04 crc kubenswrapper[4769]: I0131 16:42:04.004714 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerStarted","Data":"feb3d5f7fe518bd5abed655faac02bc9c2c6d13f0abf3e7600026fbe2bec13d7"} Jan 31 16:42:05 crc kubenswrapper[4769]: I0131 16:42:05.011458 4769 generic.go:334] "Generic (PLEG): container finished" podID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerID="0f829c368e61e49e4dcf3829eafa843b6d326545f32c33f4fe36753b1b6f9230" exitCode=0 Jan 31 16:42:05 crc kubenswrapper[4769]: I0131 16:42:05.011543 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerDied","Data":"0f829c368e61e49e4dcf3829eafa843b6d326545f32c33f4fe36753b1b6f9230"} Jan 31 16:42:06 crc kubenswrapper[4769]: I0131 16:42:06.021567 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerStarted","Data":"959b80a50c976b10a2a6ad695128cb4f225e1f77ab5704772111b0b4c8540969"} Jan 31 16:42:07 crc kubenswrapper[4769]: I0131 16:42:07.036463 4769 generic.go:334] "Generic (PLEG): container finished" podID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerID="959b80a50c976b10a2a6ad695128cb4f225e1f77ab5704772111b0b4c8540969" exitCode=0 Jan 31 16:42:07 crc kubenswrapper[4769]: I0131 16:42:07.036564 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerDied","Data":"959b80a50c976b10a2a6ad695128cb4f225e1f77ab5704772111b0b4c8540969"} Jan 31 16:42:08 crc kubenswrapper[4769]: I0131 16:42:08.049057 4769 generic.go:334] "Generic (PLEG): container finished" podID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerID="a68e72dd771505c170798baa2840181ea0235be8afc230a333f7953e8c7e205c" exitCode=0 Jan 31 16:42:08 crc kubenswrapper[4769]: I0131 16:42:08.049107 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerDied","Data":"a68e72dd771505c170798baa2840181ea0235be8afc230a333f7953e8c7e205c"} Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.299068 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.477889 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle\") pod \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.477961 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util\") pod \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.478147 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x5jl\" (UniqueName: \"kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl\") pod \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\" (UID: \"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5\") " Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.480225 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle" (OuterVolumeSpecName: "bundle") pod "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" (UID: "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.488506 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl" (OuterVolumeSpecName: "kube-api-access-4x5jl") pod "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" (UID: "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5"). InnerVolumeSpecName "kube-api-access-4x5jl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.492489 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util" (OuterVolumeSpecName: "util") pod "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" (UID: "0634e0a0-8b72-4218-9076-dc8cfdc6c3e5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.579677 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x5jl\" (UniqueName: \"kubernetes.io/projected/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-kube-api-access-4x5jl\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.579730 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:09 crc kubenswrapper[4769]: I0131 16:42:09.579751 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0634e0a0-8b72-4218-9076-dc8cfdc6c3e5-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:10 crc kubenswrapper[4769]: I0131 16:42:10.065336 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" event={"ID":"0634e0a0-8b72-4218-9076-dc8cfdc6c3e5","Type":"ContainerDied","Data":"feb3d5f7fe518bd5abed655faac02bc9c2c6d13f0abf3e7600026fbe2bec13d7"} Jan 31 16:42:10 crc kubenswrapper[4769]: I0131 16:42:10.065389 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="feb3d5f7fe518bd5abed655faac02bc9c2c6d13f0abf3e7600026fbe2bec13d7" Jan 31 16:42:10 crc kubenswrapper[4769]: I0131 16:42:10.065432 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.304495 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59"] Jan 31 16:42:16 crc kubenswrapper[4769]: E0131 16:42:16.305195 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="pull" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.305209 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="pull" Jan 31 16:42:16 crc kubenswrapper[4769]: E0131 16:42:16.305223 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="util" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.305248 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="util" Jan 31 16:42:16 crc kubenswrapper[4769]: E0131 16:42:16.305262 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="extract" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.305271 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="extract" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.305412 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0634e0a0-8b72-4218-9076-dc8cfdc6c3e5" containerName="extract" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.305834 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.308372 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.309069 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.309224 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-xrznf" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.320162 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59"] Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.390734 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-webhook-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.390806 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69n4d\" (UniqueName: \"kubernetes.io/projected/374acfa1-5eda-48fd-964f-7fc81aaab552-kube-api-access-69n4d\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.391081 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-apiservice-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.492653 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-apiservice-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.492787 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-webhook-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.492876 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69n4d\" (UniqueName: \"kubernetes.io/projected/374acfa1-5eda-48fd-964f-7fc81aaab552-kube-api-access-69n4d\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.498384 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-apiservice-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.498386 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/374acfa1-5eda-48fd-964f-7fc81aaab552-webhook-cert\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.511265 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69n4d\" (UniqueName: \"kubernetes.io/projected/374acfa1-5eda-48fd-964f-7fc81aaab552-kube-api-access-69n4d\") pod \"mariadb-operator-controller-manager-cb6c676df-j2w59\" (UID: \"374acfa1-5eda-48fd-964f-7fc81aaab552\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:16 crc kubenswrapper[4769]: I0131 16:42:16.623083 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:17 crc kubenswrapper[4769]: I0131 16:42:17.115856 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59"] Jan 31 16:42:18 crc kubenswrapper[4769]: I0131 16:42:18.116885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" event={"ID":"374acfa1-5eda-48fd-964f-7fc81aaab552","Type":"ContainerStarted","Data":"da2d3eb9110447978c70d81ecc887ad9eafad1797d0c773eaa2d3f8ce919edcf"} Jan 31 16:42:20 crc kubenswrapper[4769]: I0131 16:42:20.681900 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:42:20 crc kubenswrapper[4769]: I0131 16:42:20.682184 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:42:20 crc kubenswrapper[4769]: I0131 16:42:20.682229 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:42:20 crc kubenswrapper[4769]: I0131 16:42:20.682765 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:42:20 crc kubenswrapper[4769]: I0131 16:42:20.682815 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0" gracePeriod=600 Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.137853 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" event={"ID":"374acfa1-5eda-48fd-964f-7fc81aaab552","Type":"ContainerStarted","Data":"9c7b9565df4ef418097db898e91a8c5679adeb3b2ba3178cbd627104b4a446f9"} Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.138193 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.142040 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0" exitCode=0 Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.142095 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0"} Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.142133 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a"} Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.142160 4769 scope.go:117] "RemoveContainer" containerID="d3a0f00c1b4d89d80065217a88665f99018d91df8e0042a3f3c33726e97b1315" Jan 31 16:42:21 crc kubenswrapper[4769]: I0131 16:42:21.166572 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" podStartSLOduration=2.046865021 podStartE2EDuration="5.166539633s" podCreationTimestamp="2026-01-31 16:42:16 +0000 UTC" firstStartedPulling="2026-01-31 16:42:17.126483725 +0000 UTC m=+785.200652394" lastFinishedPulling="2026-01-31 16:42:20.246158337 +0000 UTC m=+788.320327006" observedRunningTime="2026-01-31 16:42:21.158639022 +0000 UTC m=+789.232807691" watchObservedRunningTime="2026-01-31 16:42:21.166539633 +0000 UTC m=+789.240708332" Jan 31 16:42:26 crc kubenswrapper[4769]: I0131 16:42:26.628960 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c676df-j2w59" Jan 31 16:42:29 crc kubenswrapper[4769]: I0131 16:42:29.916894 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:29 crc kubenswrapper[4769]: I0131 16:42:29.918063 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:29 crc kubenswrapper[4769]: I0131 16:42:29.919829 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-59n7h" Jan 31 16:42:29 crc kubenswrapper[4769]: I0131 16:42:29.931726 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:29 crc kubenswrapper[4769]: I0131 16:42:29.970075 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flcgq\" (UniqueName: \"kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq\") pod \"infra-operator-index-x7p2x\" (UID: \"69da7924-4846-40cc-b6c7-f70e77c20147\") " pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:30 crc kubenswrapper[4769]: I0131 16:42:30.071103 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flcgq\" (UniqueName: \"kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq\") pod \"infra-operator-index-x7p2x\" (UID: \"69da7924-4846-40cc-b6c7-f70e77c20147\") " pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:30 crc kubenswrapper[4769]: I0131 16:42:30.091187 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flcgq\" (UniqueName: \"kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq\") pod \"infra-operator-index-x7p2x\" (UID: \"69da7924-4846-40cc-b6c7-f70e77c20147\") " pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:30 crc kubenswrapper[4769]: I0131 16:42:30.274894 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:30 crc kubenswrapper[4769]: I0131 16:42:30.487237 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:31 crc kubenswrapper[4769]: I0131 16:42:31.203048 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-x7p2x" event={"ID":"69da7924-4846-40cc-b6c7-f70e77c20147","Type":"ContainerStarted","Data":"035bf955bebceced55293eb8082528ac7f232c1d3a2d638f570fcdf15c11b52d"} Jan 31 16:42:32 crc kubenswrapper[4769]: I0131 16:42:32.212335 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-x7p2x" event={"ID":"69da7924-4846-40cc-b6c7-f70e77c20147","Type":"ContainerStarted","Data":"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b"} Jan 31 16:42:32 crc kubenswrapper[4769]: I0131 16:42:32.238059 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-x7p2x" podStartSLOduration=2.372462333 podStartE2EDuration="3.238032818s" podCreationTimestamp="2026-01-31 16:42:29 +0000 UTC" firstStartedPulling="2026-01-31 16:42:30.497020297 +0000 UTC m=+798.571188966" lastFinishedPulling="2026-01-31 16:42:31.362590782 +0000 UTC m=+799.436759451" observedRunningTime="2026-01-31 16:42:32.233920193 +0000 UTC m=+800.308088892" watchObservedRunningTime="2026-01-31 16:42:32.238032818 +0000 UTC m=+800.312201517" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.109741 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.225691 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/infra-operator-index-x7p2x" podUID="69da7924-4846-40cc-b6c7-f70e77c20147" containerName="registry-server" containerID="cri-o://1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b" gracePeriod=2 Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.638648 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.740761 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-t2kpn"] Jan 31 16:42:34 crc kubenswrapper[4769]: E0131 16:42:34.742294 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69da7924-4846-40cc-b6c7-f70e77c20147" containerName="registry-server" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.742324 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="69da7924-4846-40cc-b6c7-f70e77c20147" containerName="registry-server" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.742679 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="69da7924-4846-40cc-b6c7-f70e77c20147" containerName="registry-server" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.744402 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.749401 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-t2kpn"] Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.843413 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flcgq\" (UniqueName: \"kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq\") pod \"69da7924-4846-40cc-b6c7-f70e77c20147\" (UID: \"69da7924-4846-40cc-b6c7-f70e77c20147\") " Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.843846 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqwdm\" (UniqueName: \"kubernetes.io/projected/c9ad763b-ad6a-4356-8824-0551ae2544f4-kube-api-access-fqwdm\") pod \"infra-operator-index-t2kpn\" (UID: \"c9ad763b-ad6a-4356-8824-0551ae2544f4\") " pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.848636 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq" (OuterVolumeSpecName: "kube-api-access-flcgq") pod "69da7924-4846-40cc-b6c7-f70e77c20147" (UID: "69da7924-4846-40cc-b6c7-f70e77c20147"). InnerVolumeSpecName "kube-api-access-flcgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.944878 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqwdm\" (UniqueName: \"kubernetes.io/projected/c9ad763b-ad6a-4356-8824-0551ae2544f4-kube-api-access-fqwdm\") pod \"infra-operator-index-t2kpn\" (UID: \"c9ad763b-ad6a-4356-8824-0551ae2544f4\") " pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.944971 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flcgq\" (UniqueName: \"kubernetes.io/projected/69da7924-4846-40cc-b6c7-f70e77c20147-kube-api-access-flcgq\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:34 crc kubenswrapper[4769]: I0131 16:42:34.967654 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqwdm\" (UniqueName: \"kubernetes.io/projected/c9ad763b-ad6a-4356-8824-0551ae2544f4-kube-api-access-fqwdm\") pod \"infra-operator-index-t2kpn\" (UID: \"c9ad763b-ad6a-4356-8824-0551ae2544f4\") " pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.056520 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.233212 4769 generic.go:334] "Generic (PLEG): container finished" podID="69da7924-4846-40cc-b6c7-f70e77c20147" containerID="1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b" exitCode=0 Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.233388 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-x7p2x" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.233428 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-x7p2x" event={"ID":"69da7924-4846-40cc-b6c7-f70e77c20147","Type":"ContainerDied","Data":"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b"} Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.233972 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-x7p2x" event={"ID":"69da7924-4846-40cc-b6c7-f70e77c20147","Type":"ContainerDied","Data":"035bf955bebceced55293eb8082528ac7f232c1d3a2d638f570fcdf15c11b52d"} Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.233998 4769 scope.go:117] "RemoveContainer" containerID="1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.264425 4769 scope.go:117] "RemoveContainer" containerID="1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b" Jan 31 16:42:35 crc kubenswrapper[4769]: E0131 16:42:35.265825 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b\": container with ID starting with 1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b not found: ID does not exist" containerID="1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.265880 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b"} err="failed to get container status \"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b\": rpc error: code = NotFound desc = could not find container \"1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b\": container with ID starting with 1823973a4ba613db083f3ee76b900d3ef14cfed061d9552ab626ae3051db724b not found: ID does not exist" Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.277801 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.282064 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/infra-operator-index-x7p2x"] Jan 31 16:42:35 crc kubenswrapper[4769]: I0131 16:42:35.285724 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-t2kpn"] Jan 31 16:42:35 crc kubenswrapper[4769]: W0131 16:42:35.286756 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9ad763b_ad6a_4356_8824_0551ae2544f4.slice/crio-331942d5c768f277c337f11105577d163159897ab9791c549842fc9c06db06b1 WatchSource:0}: Error finding container 331942d5c768f277c337f11105577d163159897ab9791c549842fc9c06db06b1: Status 404 returned error can't find the container with id 331942d5c768f277c337f11105577d163159897ab9791c549842fc9c06db06b1 Jan 31 16:42:36 crc kubenswrapper[4769]: I0131 16:42:36.239763 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-t2kpn" event={"ID":"c9ad763b-ad6a-4356-8824-0551ae2544f4","Type":"ContainerStarted","Data":"71b9537c7095e41acb32c251772f808afeea667fc9ebaff3a02a06700e417386"} Jan 31 16:42:36 crc kubenswrapper[4769]: I0131 16:42:36.240142 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-t2kpn" event={"ID":"c9ad763b-ad6a-4356-8824-0551ae2544f4","Type":"ContainerStarted","Data":"331942d5c768f277c337f11105577d163159897ab9791c549842fc9c06db06b1"} Jan 31 16:42:36 crc kubenswrapper[4769]: I0131 16:42:36.269017 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-t2kpn" podStartSLOduration=1.808328989 podStartE2EDuration="2.268987914s" podCreationTimestamp="2026-01-31 16:42:34 +0000 UTC" firstStartedPulling="2026-01-31 16:42:35.291800064 +0000 UTC m=+803.365968733" lastFinishedPulling="2026-01-31 16:42:35.752458999 +0000 UTC m=+803.826627658" observedRunningTime="2026-01-31 16:42:36.258547676 +0000 UTC m=+804.332716415" watchObservedRunningTime="2026-01-31 16:42:36.268987914 +0000 UTC m=+804.343156623" Jan 31 16:42:36 crc kubenswrapper[4769]: I0131 16:42:36.719166 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69da7924-4846-40cc-b6c7-f70e77c20147" path="/var/lib/kubelet/pods/69da7924-4846-40cc-b6c7-f70e77c20147/volumes" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.057069 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.057735 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.103163 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.133923 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.136292 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.141802 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.227542 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsf46\" (UniqueName: \"kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.227597 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.227751 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.328686 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsf46\" (UniqueName: \"kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.328841 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.328921 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.329487 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.329523 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.337657 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-t2kpn" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.363566 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsf46\" (UniqueName: \"kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46\") pod \"certified-operators-wff9s\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.481311 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:45 crc kubenswrapper[4769]: I0131 16:42:45.706336 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:42:45 crc kubenswrapper[4769]: W0131 16:42:45.733799 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda808184b_833a_4f37_9ed7_a086bb424f41.slice/crio-836abaaea2648a936877bb249d6779b076c32ff20b926300440730018d42a38e WatchSource:0}: Error finding container 836abaaea2648a936877bb249d6779b076c32ff20b926300440730018d42a38e: Status 404 returned error can't find the container with id 836abaaea2648a936877bb249d6779b076c32ff20b926300440730018d42a38e Jan 31 16:42:46 crc kubenswrapper[4769]: I0131 16:42:46.313766 4769 generic.go:334] "Generic (PLEG): container finished" podID="a808184b-833a-4f37-9ed7-a086bb424f41" containerID="8db5364e60665dc122fb49dc62c7417ba653959ffe753dbb62c9576579991d2e" exitCode=0 Jan 31 16:42:46 crc kubenswrapper[4769]: I0131 16:42:46.313858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerDied","Data":"8db5364e60665dc122fb49dc62c7417ba653959ffe753dbb62c9576579991d2e"} Jan 31 16:42:46 crc kubenswrapper[4769]: I0131 16:42:46.314210 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerStarted","Data":"836abaaea2648a936877bb249d6779b076c32ff20b926300440730018d42a38e"} Jan 31 16:42:47 crc kubenswrapper[4769]: I0131 16:42:47.324052 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerStarted","Data":"c0eb5367903104536b170ba6773763b708ad42ae701a5fd6bb1a6f96428452f4"} Jan 31 16:42:48 crc kubenswrapper[4769]: I0131 16:42:48.333873 4769 generic.go:334] "Generic (PLEG): container finished" podID="a808184b-833a-4f37-9ed7-a086bb424f41" containerID="c0eb5367903104536b170ba6773763b708ad42ae701a5fd6bb1a6f96428452f4" exitCode=0 Jan 31 16:42:48 crc kubenswrapper[4769]: I0131 16:42:48.333942 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerDied","Data":"c0eb5367903104536b170ba6773763b708ad42ae701a5fd6bb1a6f96428452f4"} Jan 31 16:42:49 crc kubenswrapper[4769]: I0131 16:42:49.344012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerStarted","Data":"82e9e1b09e8b7f7e64b3393108e6781461b3957473d2a72a9ac5d56ede9acc74"} Jan 31 16:42:49 crc kubenswrapper[4769]: I0131 16:42:49.368646 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wff9s" podStartSLOduration=1.9522156480000001 podStartE2EDuration="4.368621428s" podCreationTimestamp="2026-01-31 16:42:45 +0000 UTC" firstStartedPulling="2026-01-31 16:42:46.315815656 +0000 UTC m=+814.389984345" lastFinishedPulling="2026-01-31 16:42:48.732221466 +0000 UTC m=+816.806390125" observedRunningTime="2026-01-31 16:42:49.366288938 +0000 UTC m=+817.440457637" watchObservedRunningTime="2026-01-31 16:42:49.368621428 +0000 UTC m=+817.442790137" Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.776772 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv"] Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.778844 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.780786 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.787731 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv"] Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.919206 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkl4w\" (UniqueName: \"kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.919297 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:51 crc kubenswrapper[4769]: I0131 16:42:51.919366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.020047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkl4w\" (UniqueName: \"kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.020379 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.020578 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.020953 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.020969 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.049247 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkl4w\" (UniqueName: \"kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w\") pod \"d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.104086 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:52 crc kubenswrapper[4769]: I0131 16:42:52.502891 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv"] Jan 31 16:42:52 crc kubenswrapper[4769]: W0131 16:42:52.520659 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58664432_2fc3_423c_b54a_14d34b96318c.slice/crio-9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22 WatchSource:0}: Error finding container 9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22: Status 404 returned error can't find the container with id 9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22 Jan 31 16:42:53 crc kubenswrapper[4769]: E0131 16:42:53.103458 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58664432_2fc3_423c_b54a_14d34b96318c.slice/crio-conmon-bb1d7e7862d9366d32379aee3c0183af33477f5579d97717155c384e2bd0cfa0.scope\": RecentStats: unable to find data in memory cache]" Jan 31 16:42:53 crc kubenswrapper[4769]: I0131 16:42:53.376485 4769 generic.go:334] "Generic (PLEG): container finished" podID="58664432-2fc3-423c-b54a-14d34b96318c" containerID="bb1d7e7862d9366d32379aee3c0183af33477f5579d97717155c384e2bd0cfa0" exitCode=0 Jan 31 16:42:53 crc kubenswrapper[4769]: I0131 16:42:53.376559 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" event={"ID":"58664432-2fc3-423c-b54a-14d34b96318c","Type":"ContainerDied","Data":"bb1d7e7862d9366d32379aee3c0183af33477f5579d97717155c384e2bd0cfa0"} Jan 31 16:42:53 crc kubenswrapper[4769]: I0131 16:42:53.376590 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" event={"ID":"58664432-2fc3-423c-b54a-14d34b96318c","Type":"ContainerStarted","Data":"9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22"} Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.386102 4769 generic.go:334] "Generic (PLEG): container finished" podID="58664432-2fc3-423c-b54a-14d34b96318c" containerID="31b28b2ecf644886c656f4142c4b7efd22c25d33a2e7e186e7ff59a5bede054b" exitCode=0 Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.386169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" event={"ID":"58664432-2fc3-423c-b54a-14d34b96318c","Type":"ContainerDied","Data":"31b28b2ecf644886c656f4142c4b7efd22c25d33a2e7e186e7ff59a5bede054b"} Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.915460 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.916527 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.932547 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.973734 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mslj\" (UniqueName: \"kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.973799 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:54 crc kubenswrapper[4769]: I0131 16:42:54.973865 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.075320 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mslj\" (UniqueName: \"kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.075383 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.075429 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.075884 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.075973 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.103040 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mslj\" (UniqueName: \"kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj\") pod \"redhat-operators-4x77c\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.232211 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.398935 4769 generic.go:334] "Generic (PLEG): container finished" podID="58664432-2fc3-423c-b54a-14d34b96318c" containerID="0ea3150d03dacbdcd0ff54e5bd91aca76cd4475d2121ee7c0f970c7c98a75ef8" exitCode=0 Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.398975 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" event={"ID":"58664432-2fc3-423c-b54a-14d34b96318c","Type":"ContainerDied","Data":"0ea3150d03dacbdcd0ff54e5bd91aca76cd4475d2121ee7c0f970c7c98a75ef8"} Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.481894 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.482338 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.531486 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:55 crc kubenswrapper[4769]: I0131 16:42:55.692160 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.405023 4769 generic.go:334] "Generic (PLEG): container finished" podID="99ffb860-a86b-42ae-827a-0caae9d09043" containerID="4a4dde229a2a4fbcf6f788bf971fa08b794a2a29108fa0e5a845d76819f27644" exitCode=0 Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.405117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerDied","Data":"4a4dde229a2a4fbcf6f788bf971fa08b794a2a29108fa0e5a845d76819f27644"} Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.405364 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerStarted","Data":"e1750d9818f496e39dd5a68802388aedb1e9286d152c69a339a73d2662649315"} Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.451152 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.681447 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.807095 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle\") pod \"58664432-2fc3-423c-b54a-14d34b96318c\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.807191 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util\") pod \"58664432-2fc3-423c-b54a-14d34b96318c\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.807218 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkl4w\" (UniqueName: \"kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w\") pod \"58664432-2fc3-423c-b54a-14d34b96318c\" (UID: \"58664432-2fc3-423c-b54a-14d34b96318c\") " Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.812634 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w" (OuterVolumeSpecName: "kube-api-access-mkl4w") pod "58664432-2fc3-423c-b54a-14d34b96318c" (UID: "58664432-2fc3-423c-b54a-14d34b96318c"). InnerVolumeSpecName "kube-api-access-mkl4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.813201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle" (OuterVolumeSpecName: "bundle") pod "58664432-2fc3-423c-b54a-14d34b96318c" (UID: "58664432-2fc3-423c-b54a-14d34b96318c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.820443 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util" (OuterVolumeSpecName: "util") pod "58664432-2fc3-423c-b54a-14d34b96318c" (UID: "58664432-2fc3-423c-b54a-14d34b96318c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.909941 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.910008 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkl4w\" (UniqueName: \"kubernetes.io/projected/58664432-2fc3-423c-b54a-14d34b96318c-kube-api-access-mkl4w\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:56 crc kubenswrapper[4769]: I0131 16:42:56.910032 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/58664432-2fc3-423c-b54a-14d34b96318c-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:57 crc kubenswrapper[4769]: I0131 16:42:57.417163 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" event={"ID":"58664432-2fc3-423c-b54a-14d34b96318c","Type":"ContainerDied","Data":"9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22"} Jan 31 16:42:57 crc kubenswrapper[4769]: I0131 16:42:57.417728 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e1ca6d2da7830f0dc26c6da7ca2edd2af839c2a6b086f521d1b6842ccdeff22" Jan 31 16:42:57 crc kubenswrapper[4769]: I0131 16:42:57.417211 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv" Jan 31 16:42:57 crc kubenswrapper[4769]: I0131 16:42:57.425925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerStarted","Data":"ab76025b5acf3e4d83ec95492415472ad79a514afdd7aa97016d5679d5e3f3b7"} Jan 31 16:42:58 crc kubenswrapper[4769]: I0131 16:42:58.432723 4769 generic.go:334] "Generic (PLEG): container finished" podID="99ffb860-a86b-42ae-827a-0caae9d09043" containerID="ab76025b5acf3e4d83ec95492415472ad79a514afdd7aa97016d5679d5e3f3b7" exitCode=0 Jan 31 16:42:58 crc kubenswrapper[4769]: I0131 16:42:58.432967 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerDied","Data":"ab76025b5acf3e4d83ec95492415472ad79a514afdd7aa97016d5679d5e3f3b7"} Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.311593 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.311924 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wff9s" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="registry-server" containerID="cri-o://82e9e1b09e8b7f7e64b3393108e6781461b3957473d2a72a9ac5d56ede9acc74" gracePeriod=2 Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.441143 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerStarted","Data":"ec01bd28589e76f9acc0d234d7fed5527e60db8aa488775f8e021c00a9fb3380"} Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.443700 4769 generic.go:334] "Generic (PLEG): container finished" podID="a808184b-833a-4f37-9ed7-a086bb424f41" containerID="82e9e1b09e8b7f7e64b3393108e6781461b3957473d2a72a9ac5d56ede9acc74" exitCode=0 Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.443751 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerDied","Data":"82e9e1b09e8b7f7e64b3393108e6781461b3957473d2a72a9ac5d56ede9acc74"} Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.464045 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4x77c" podStartSLOduration=3.075682371 podStartE2EDuration="5.464029574s" podCreationTimestamp="2026-01-31 16:42:54 +0000 UTC" firstStartedPulling="2026-01-31 16:42:56.407319432 +0000 UTC m=+824.481488101" lastFinishedPulling="2026-01-31 16:42:58.795666635 +0000 UTC m=+826.869835304" observedRunningTime="2026-01-31 16:42:59.460168495 +0000 UTC m=+827.534337194" watchObservedRunningTime="2026-01-31 16:42:59.464029574 +0000 UTC m=+827.538198233" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.695050 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.852047 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities\") pod \"a808184b-833a-4f37-9ed7-a086bb424f41\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.852141 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsf46\" (UniqueName: \"kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46\") pod \"a808184b-833a-4f37-9ed7-a086bb424f41\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.852183 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content\") pod \"a808184b-833a-4f37-9ed7-a086bb424f41\" (UID: \"a808184b-833a-4f37-9ed7-a086bb424f41\") " Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.853637 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities" (OuterVolumeSpecName: "utilities") pod "a808184b-833a-4f37-9ed7-a086bb424f41" (UID: "a808184b-833a-4f37-9ed7-a086bb424f41"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.865764 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46" (OuterVolumeSpecName: "kube-api-access-wsf46") pod "a808184b-833a-4f37-9ed7-a086bb424f41" (UID: "a808184b-833a-4f37-9ed7-a086bb424f41"). InnerVolumeSpecName "kube-api-access-wsf46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.907893 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a808184b-833a-4f37-9ed7-a086bb424f41" (UID: "a808184b-833a-4f37-9ed7-a086bb424f41"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.953370 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.953392 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsf46\" (UniqueName: \"kubernetes.io/projected/a808184b-833a-4f37-9ed7-a086bb424f41-kube-api-access-wsf46\") on node \"crc\" DevicePath \"\"" Jan 31 16:42:59 crc kubenswrapper[4769]: I0131 16:42:59.953403 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a808184b-833a-4f37-9ed7-a086bb424f41-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.458290 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wff9s" Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.458307 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wff9s" event={"ID":"a808184b-833a-4f37-9ed7-a086bb424f41","Type":"ContainerDied","Data":"836abaaea2648a936877bb249d6779b076c32ff20b926300440730018d42a38e"} Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.458400 4769 scope.go:117] "RemoveContainer" containerID="82e9e1b09e8b7f7e64b3393108e6781461b3957473d2a72a9ac5d56ede9acc74" Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.486707 4769 scope.go:117] "RemoveContainer" containerID="c0eb5367903104536b170ba6773763b708ad42ae701a5fd6bb1a6f96428452f4" Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.506199 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.514599 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wff9s"] Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.523649 4769 scope.go:117] "RemoveContainer" containerID="8db5364e60665dc122fb49dc62c7417ba653959ffe753dbb62c9576579991d2e" Jan 31 16:43:00 crc kubenswrapper[4769]: I0131 16:43:00.716705 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" path="/var/lib/kubelet/pods/a808184b-833a-4f37-9ed7-a086bb424f41/volumes" Jan 31 16:43:05 crc kubenswrapper[4769]: I0131 16:43:05.233465 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:05 crc kubenswrapper[4769]: I0131 16:43:05.234382 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:06 crc kubenswrapper[4769]: I0131 16:43:06.288310 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4x77c" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="registry-server" probeResult="failure" output=< Jan 31 16:43:06 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:43:06 crc kubenswrapper[4769]: > Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.305447 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f"] Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.305962 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="util" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.305976 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="util" Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.305992 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="extract" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306001 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="extract" Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.306018 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="registry-server" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306027 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="registry-server" Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.306047 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="pull" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306055 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="pull" Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.306067 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="extract-utilities" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306077 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="extract-utilities" Jan 31 16:43:09 crc kubenswrapper[4769]: E0131 16:43:09.306087 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="extract-content" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306095 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="extract-content" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306218 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a808184b-833a-4f37-9ed7-a086bb424f41" containerName="registry-server" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306237 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="58664432-2fc3-423c-b54a-14d34b96318c" containerName="extract" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.306689 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.308907 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-schb9" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.309849 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.320718 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f"] Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.377082 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwh8c\" (UniqueName: \"kubernetes.io/projected/3758c511-4d8c-4ebd-a8f6-ae939aa41381-kube-api-access-lwh8c\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.377193 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-webhook-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.377235 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-apiservice-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.477709 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwh8c\" (UniqueName: \"kubernetes.io/projected/3758c511-4d8c-4ebd-a8f6-ae939aa41381-kube-api-access-lwh8c\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.477764 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-webhook-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.477788 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-apiservice-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.494103 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-apiservice-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.494244 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3758c511-4d8c-4ebd-a8f6-ae939aa41381-webhook-cert\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.499834 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwh8c\" (UniqueName: \"kubernetes.io/projected/3758c511-4d8c-4ebd-a8f6-ae939aa41381-kube-api-access-lwh8c\") pod \"infra-operator-controller-manager-6c68d645db-8rh4f\" (UID: \"3758c511-4d8c-4ebd-a8f6-ae939aa41381\") " pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:09 crc kubenswrapper[4769]: I0131 16:43:09.628067 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:10 crc kubenswrapper[4769]: I0131 16:43:10.045840 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f"] Jan 31 16:43:10 crc kubenswrapper[4769]: I0131 16:43:10.523478 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" event={"ID":"3758c511-4d8c-4ebd-a8f6-ae939aa41381","Type":"ContainerStarted","Data":"6dd8fb0ec56111ef6270ab86aaf4fc1769e0308c03e99e0e96509ceff65835c2"} Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.086545 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.087977 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.096644 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"kube-root-ca.crt" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.096889 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openstack-scripts" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.097047 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openshift-service-ca.crt" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.097926 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"galera-openstack-dockercfg-8m5cn" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.098159 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"openstack-config-data" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.125479 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.126663 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.143269 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.154662 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.159780 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.159948 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.161408 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219687 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kolla-config\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219762 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-default\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219785 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219813 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219891 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2snc\" (UniqueName: \"kubernetes.io/projected/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kube-api-access-j2snc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.219917 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321427 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321478 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kolla-config\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321507 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321541 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321572 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-default\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321611 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25dst\" (UniqueName: \"kubernetes.io/projected/322a07d1-dd08-4c79-a511-bab71e44d9e9-kube-api-access-25dst\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321636 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-kolla-config\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321728 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-default\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.321805 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.322334 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.322385 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kolla-config\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.322438 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-config-data-default\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323130 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4215cc6b-6982-4db5-bec8-2cf774a3cd59-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323182 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-generated\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323359 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323383 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2snc\" (UniqueName: \"kubernetes.io/projected/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kube-api-access-j2snc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323399 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323422 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323440 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323455 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d45bz\" (UniqueName: \"kubernetes.io/projected/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kube-api-access-d45bz\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323476 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-operator-scripts\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.323788 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") device mount path \"/mnt/openstack/pv10\"" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.341736 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.350373 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2snc\" (UniqueName: \"kubernetes.io/projected/4215cc6b-6982-4db5-bec8-2cf774a3cd59-kube-api-access-j2snc\") pod \"openstack-galera-0\" (UID: \"4215cc6b-6982-4db5-bec8-2cf774a3cd59\") " pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.409231 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424454 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-generated\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424560 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424595 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424627 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424668 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d45bz\" (UniqueName: \"kubernetes.io/projected/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kube-api-access-d45bz\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424695 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-operator-scripts\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424742 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424776 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424802 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424822 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-default\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424856 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25dst\" (UniqueName: \"kubernetes.io/projected/322a07d1-dd08-4c79-a511-bab71e44d9e9-kube-api-access-25dst\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.424881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-kolla-config\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.425277 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-generated\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.425666 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-kolla-config\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.427200 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-operator-scripts\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.427375 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") device mount path \"/mnt/openstack/pv02\"" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.427401 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-operator-scripts\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.427555 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") device mount path \"/mnt/openstack/pv03\"" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.428288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-default\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.428505 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-config-data-generated\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.429042 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/322a07d1-dd08-4c79-a511-bab71e44d9e9-config-data-default\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.429820 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kolla-config\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.447777 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25dst\" (UniqueName: \"kubernetes.io/projected/322a07d1-dd08-4c79-a511-bab71e44d9e9-kube-api-access-25dst\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.448053 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.450162 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d45bz\" (UniqueName: \"kubernetes.io/projected/cc26650a-e98c-4c58-bfea-16ebfc50a1a1-kube-api-access-d45bz\") pod \"openstack-galera-2\" (UID: \"cc26650a-e98c-4c58-bfea-16ebfc50a1a1\") " pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.454354 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-1\" (UID: \"322a07d1-dd08-4c79-a511-bab71e44d9e9\") " pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.478005 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.564558 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" event={"ID":"3758c511-4d8c-4ebd-a8f6-ae939aa41381","Type":"ContainerStarted","Data":"d1b0471dc3b4be9ec798403d3d96bbfc3ea284787d588d556229cf7fa81186a6"} Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.565775 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.592465 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" podStartSLOduration=1.6872854830000001 podStartE2EDuration="3.592442474s" podCreationTimestamp="2026-01-31 16:43:09 +0000 UTC" firstStartedPulling="2026-01-31 16:43:10.056578928 +0000 UTC m=+838.130747617" lastFinishedPulling="2026-01-31 16:43:11.961735939 +0000 UTC m=+840.035904608" observedRunningTime="2026-01-31 16:43:12.592374203 +0000 UTC m=+840.666542862" watchObservedRunningTime="2026-01-31 16:43:12.592442474 +0000 UTC m=+840.666611143" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.634246 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-0"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.754098 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.785269 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-2"] Jan 31 16:43:12 crc kubenswrapper[4769]: I0131 16:43:12.953053 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/openstack-galera-1"] Jan 31 16:43:12 crc kubenswrapper[4769]: W0131 16:43:12.957737 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod322a07d1_dd08_4c79_a511_bab71e44d9e9.slice/crio-9f87e5d1213464e0a20b009fe8d6e5b11916012569dfa618d1a3b5c1b415cbac WatchSource:0}: Error finding container 9f87e5d1213464e0a20b009fe8d6e5b11916012569dfa618d1a3b5c1b415cbac: Status 404 returned error can't find the container with id 9f87e5d1213464e0a20b009fe8d6e5b11916012569dfa618d1a3b5c1b415cbac Jan 31 16:43:13 crc kubenswrapper[4769]: I0131 16:43:13.571921 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"322a07d1-dd08-4c79-a511-bab71e44d9e9","Type":"ContainerStarted","Data":"9f87e5d1213464e0a20b009fe8d6e5b11916012569dfa618d1a3b5c1b415cbac"} Jan 31 16:43:13 crc kubenswrapper[4769]: I0131 16:43:13.573594 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc26650a-e98c-4c58-bfea-16ebfc50a1a1","Type":"ContainerStarted","Data":"26d32efc3de3c4e79eedfd1dfd31fca789f62601ae3b87c8f6040db21fb3696c"} Jan 31 16:43:13 crc kubenswrapper[4769]: I0131 16:43:13.575738 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"4215cc6b-6982-4db5-bec8-2cf774a3cd59","Type":"ContainerStarted","Data":"890b280c18f5ab07a1aa40546f7a237538a2b0dfb593d8cf960ac293b72999e5"} Jan 31 16:43:15 crc kubenswrapper[4769]: I0131 16:43:15.622779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:15 crc kubenswrapper[4769]: I0131 16:43:15.678376 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:19 crc kubenswrapper[4769]: I0131 16:43:19.307442 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:43:19 crc kubenswrapper[4769]: I0131 16:43:19.308119 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4x77c" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="registry-server" containerID="cri-o://ec01bd28589e76f9acc0d234d7fed5527e60db8aa488775f8e021c00a9fb3380" gracePeriod=2 Jan 31 16:43:19 crc kubenswrapper[4769]: I0131 16:43:19.634851 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6c68d645db-8rh4f" Jan 31 16:43:19 crc kubenswrapper[4769]: I0131 16:43:19.805019 4769 generic.go:334] "Generic (PLEG): container finished" podID="99ffb860-a86b-42ae-827a-0caae9d09043" containerID="ec01bd28589e76f9acc0d234d7fed5527e60db8aa488775f8e021c00a9fb3380" exitCode=0 Jan 31 16:43:19 crc kubenswrapper[4769]: I0131 16:43:19.805065 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerDied","Data":"ec01bd28589e76f9acc0d234d7fed5527e60db8aa488775f8e021c00a9fb3380"} Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.829423 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4x77c" event={"ID":"99ffb860-a86b-42ae-827a-0caae9d09043","Type":"ContainerDied","Data":"e1750d9818f496e39dd5a68802388aedb1e9286d152c69a339a73d2662649315"} Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.830063 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1750d9818f496e39dd5a68802388aedb1e9286d152c69a339a73d2662649315" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.829967 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.879946 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 31 16:43:23 crc kubenswrapper[4769]: E0131 16:43:23.880158 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="registry-server" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.880169 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="registry-server" Jan 31 16:43:23 crc kubenswrapper[4769]: E0131 16:43:23.880181 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="extract-content" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.880188 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="extract-content" Jan 31 16:43:23 crc kubenswrapper[4769]: E0131 16:43:23.880197 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="extract-utilities" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.880203 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="extract-utilities" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.880321 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" containerName="registry-server" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.880674 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.886881 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"memcached-config-data" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.886950 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"memcached-memcached-dockercfg-khpzh" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.909332 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984099 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mslj\" (UniqueName: \"kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj\") pod \"99ffb860-a86b-42ae-827a-0caae9d09043\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984250 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities\") pod \"99ffb860-a86b-42ae-827a-0caae9d09043\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984365 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content\") pod \"99ffb860-a86b-42ae-827a-0caae9d09043\" (UID: \"99ffb860-a86b-42ae-827a-0caae9d09043\") " Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984616 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-config-data\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984672 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd8z9\" (UniqueName: \"kubernetes.io/projected/99d531b0-8a9a-4568-801f-d4423fd63af5-kube-api-access-jd8z9\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:23 crc kubenswrapper[4769]: I0131 16:43:23.984772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-kolla-config\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.003813 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj" (OuterVolumeSpecName: "kube-api-access-8mslj") pod "99ffb860-a86b-42ae-827a-0caae9d09043" (UID: "99ffb860-a86b-42ae-827a-0caae9d09043"). InnerVolumeSpecName "kube-api-access-8mslj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.023295 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities" (OuterVolumeSpecName: "utilities") pod "99ffb860-a86b-42ae-827a-0caae9d09043" (UID: "99ffb860-a86b-42ae-827a-0caae9d09043"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.086314 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-kolla-config\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.086382 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-config-data\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.086419 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd8z9\" (UniqueName: \"kubernetes.io/projected/99d531b0-8a9a-4568-801f-d4423fd63af5-kube-api-access-jd8z9\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.086458 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mslj\" (UniqueName: \"kubernetes.io/projected/99ffb860-a86b-42ae-827a-0caae9d09043-kube-api-access-8mslj\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.086472 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.087448 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-kolla-config\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.087930 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99d531b0-8a9a-4568-801f-d4423fd63af5-config-data\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.123470 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd8z9\" (UniqueName: \"kubernetes.io/projected/99d531b0-8a9a-4568-801f-d4423fd63af5-kube-api-access-jd8z9\") pod \"memcached-0\" (UID: \"99d531b0-8a9a-4568-801f-d4423fd63af5\") " pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.178975 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "99ffb860-a86b-42ae-827a-0caae9d09043" (UID: "99ffb860-a86b-42ae-827a-0caae9d09043"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.188766 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99ffb860-a86b-42ae-827a-0caae9d09043-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.206009 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.469623 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/memcached-0"] Jan 31 16:43:24 crc kubenswrapper[4769]: W0131 16:43:24.476243 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99d531b0_8a9a_4568_801f_d4423fd63af5.slice/crio-44b240d43451e70e54ed8ce56ca7986bf9ce736a04a2f4b8c04a6066506d2736 WatchSource:0}: Error finding container 44b240d43451e70e54ed8ce56ca7986bf9ce736a04a2f4b8c04a6066506d2736: Status 404 returned error can't find the container with id 44b240d43451e70e54ed8ce56ca7986bf9ce736a04a2f4b8c04a6066506d2736 Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.838694 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc26650a-e98c-4c58-bfea-16ebfc50a1a1","Type":"ContainerStarted","Data":"0c592e0a6ba1dc368c63580bb906a9d93b23548160e26a8f5690549329d60325"} Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.840664 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"4215cc6b-6982-4db5-bec8-2cf774a3cd59","Type":"ContainerStarted","Data":"68bcb7a6a14d3e036195e5c3c7a77e7f1b3026fca0d9bda292ffb52c896bc7f1"} Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.842159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"322a07d1-dd08-4c79-a511-bab71e44d9e9","Type":"ContainerStarted","Data":"9c71887e0006b39b07be9be0a84776494326456056307c01ae6ab12842f67b61"} Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.843636 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4x77c" Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.844213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"99d531b0-8a9a-4568-801f-d4423fd63af5","Type":"ContainerStarted","Data":"44b240d43451e70e54ed8ce56ca7986bf9ce736a04a2f4b8c04a6066506d2736"} Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.885556 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:43:24 crc kubenswrapper[4769]: I0131 16:43:24.887588 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4x77c"] Jan 31 16:43:26 crc kubenswrapper[4769]: I0131 16:43:26.720782 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99ffb860-a86b-42ae-827a-0caae9d09043" path="/var/lib/kubelet/pods/99ffb860-a86b-42ae-827a-0caae9d09043/volumes" Jan 31 16:43:27 crc kubenswrapper[4769]: I0131 16:43:27.864176 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/memcached-0" event={"ID":"99d531b0-8a9a-4568-801f-d4423fd63af5","Type":"ContainerStarted","Data":"f5ca7c0a39cce978849c29498504c18a26c834d854721eb94e55a8f3afa3a4be"} Jan 31 16:43:27 crc kubenswrapper[4769]: I0131 16:43:27.865781 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:27 crc kubenswrapper[4769]: I0131 16:43:27.885665 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/memcached-0" podStartSLOduration=2.183336012 podStartE2EDuration="4.885648152s" podCreationTimestamp="2026-01-31 16:43:23 +0000 UTC" firstStartedPulling="2026-01-31 16:43:24.47898701 +0000 UTC m=+852.553155679" lastFinishedPulling="2026-01-31 16:43:27.18129915 +0000 UTC m=+855.255467819" observedRunningTime="2026-01-31 16:43:27.882364225 +0000 UTC m=+855.956532894" watchObservedRunningTime="2026-01-31 16:43:27.885648152 +0000 UTC m=+855.959816821" Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.872467 4769 generic.go:334] "Generic (PLEG): container finished" podID="322a07d1-dd08-4c79-a511-bab71e44d9e9" containerID="9c71887e0006b39b07be9be0a84776494326456056307c01ae6ab12842f67b61" exitCode=0 Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.872549 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"322a07d1-dd08-4c79-a511-bab71e44d9e9","Type":"ContainerDied","Data":"9c71887e0006b39b07be9be0a84776494326456056307c01ae6ab12842f67b61"} Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.874704 4769 generic.go:334] "Generic (PLEG): container finished" podID="cc26650a-e98c-4c58-bfea-16ebfc50a1a1" containerID="0c592e0a6ba1dc368c63580bb906a9d93b23548160e26a8f5690549329d60325" exitCode=0 Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.874777 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc26650a-e98c-4c58-bfea-16ebfc50a1a1","Type":"ContainerDied","Data":"0c592e0a6ba1dc368c63580bb906a9d93b23548160e26a8f5690549329d60325"} Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.877306 4769 generic.go:334] "Generic (PLEG): container finished" podID="4215cc6b-6982-4db5-bec8-2cf774a3cd59" containerID="68bcb7a6a14d3e036195e5c3c7a77e7f1b3026fca0d9bda292ffb52c896bc7f1" exitCode=0 Jan 31 16:43:28 crc kubenswrapper[4769]: I0131 16:43:28.877406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"4215cc6b-6982-4db5-bec8-2cf774a3cd59","Type":"ContainerDied","Data":"68bcb7a6a14d3e036195e5c3c7a77e7f1b3026fca0d9bda292ffb52c896bc7f1"} Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.889437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-1" event={"ID":"322a07d1-dd08-4c79-a511-bab71e44d9e9","Type":"ContainerStarted","Data":"f9877cf12365859ac0c555b6daf1f78684adbead40b9fcd042d8d9d6c932a504"} Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.893610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-2" event={"ID":"cc26650a-e98c-4c58-bfea-16ebfc50a1a1","Type":"ContainerStarted","Data":"17adbbdf0ae9b9b5e70658bdac1652632fe2ce12a88f1c97dbae88054f908851"} Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.896795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/openstack-galera-0" event={"ID":"4215cc6b-6982-4db5-bec8-2cf774a3cd59","Type":"ContainerStarted","Data":"aefc49dcce68f6bccdfe753bb77ddaa41775ce3aec38d3e551987f4118cc74e8"} Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.922197 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-1" podStartSLOduration=7.848616793 podStartE2EDuration="18.922163269s" podCreationTimestamp="2026-01-31 16:43:11 +0000 UTC" firstStartedPulling="2026-01-31 16:43:12.962882652 +0000 UTC m=+841.037051321" lastFinishedPulling="2026-01-31 16:43:24.036429128 +0000 UTC m=+852.110597797" observedRunningTime="2026-01-31 16:43:29.913895538 +0000 UTC m=+857.988064277" watchObservedRunningTime="2026-01-31 16:43:29.922163269 +0000 UTC m=+857.996331978" Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.936273 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-0" podStartSLOduration=7.611215392 podStartE2EDuration="18.936248764s" podCreationTimestamp="2026-01-31 16:43:11 +0000 UTC" firstStartedPulling="2026-01-31 16:43:12.684701865 +0000 UTC m=+840.758870534" lastFinishedPulling="2026-01-31 16:43:24.009735237 +0000 UTC m=+852.083903906" observedRunningTime="2026-01-31 16:43:29.935126464 +0000 UTC m=+858.009295133" watchObservedRunningTime="2026-01-31 16:43:29.936248764 +0000 UTC m=+858.010417463" Jan 31 16:43:29 crc kubenswrapper[4769]: I0131 16:43:29.960233 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/openstack-galera-2" podStartSLOduration=7.651700498 podStartE2EDuration="18.960206713s" podCreationTimestamp="2026-01-31 16:43:11 +0000 UTC" firstStartedPulling="2026-01-31 16:43:12.79902465 +0000 UTC m=+840.873193319" lastFinishedPulling="2026-01-31 16:43:24.107530865 +0000 UTC m=+852.181699534" observedRunningTime="2026-01-31 16:43:29.9563765 +0000 UTC m=+858.030545189" watchObservedRunningTime="2026-01-31 16:43:29.960206713 +0000 UTC m=+858.034375422" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.520101 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.520950 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.524142 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-jwzqf" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.535601 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.622171 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlbxz\" (UniqueName: \"kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz\") pod \"rabbitmq-cluster-operator-index-kqvxs\" (UID: \"99de27ec-fa60-442e-997f-78059569c633\") " pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.723928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlbxz\" (UniqueName: \"kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz\") pod \"rabbitmq-cluster-operator-index-kqvxs\" (UID: \"99de27ec-fa60-442e-997f-78059569c633\") " pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.747351 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlbxz\" (UniqueName: \"kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz\") pod \"rabbitmq-cluster-operator-index-kqvxs\" (UID: \"99de27ec-fa60-442e-997f-78059569c633\") " pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:30 crc kubenswrapper[4769]: I0131 16:43:30.854641 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:31 crc kubenswrapper[4769]: I0131 16:43:31.104139 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:31 crc kubenswrapper[4769]: I0131 16:43:31.953834 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" event={"ID":"99de27ec-fa60-442e-997f-78059569c633","Type":"ContainerStarted","Data":"955c7a55e08bfd5145b4872005677eff89e0c9bc418c5e39d5e17730d11c3165"} Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.409882 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.409947 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.478779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.478880 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.755118 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:32 crc kubenswrapper[4769]: I0131 16:43:32.755190 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:34 crc kubenswrapper[4769]: I0131 16:43:34.207156 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/memcached-0" Jan 31 16:43:34 crc kubenswrapper[4769]: I0131 16:43:34.983479 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" event={"ID":"99de27ec-fa60-442e-997f-78059569c633","Type":"ContainerStarted","Data":"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c"} Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.000489 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" podStartSLOduration=1.644464355 podStartE2EDuration="5.000467177s" podCreationTimestamp="2026-01-31 16:43:30 +0000 UTC" firstStartedPulling="2026-01-31 16:43:31.113193978 +0000 UTC m=+859.187362647" lastFinishedPulling="2026-01-31 16:43:34.4691968 +0000 UTC m=+862.543365469" observedRunningTime="2026-01-31 16:43:34.997654312 +0000 UTC m=+863.071823021" watchObservedRunningTime="2026-01-31 16:43:35.000467177 +0000 UTC m=+863.074635846" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.724630 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.727423 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.738344 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.810618 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.810993 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsc45\" (UniqueName: \"kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.811059 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.912109 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.912239 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsc45\" (UniqueName: \"kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.912265 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.912850 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.912919 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:35 crc kubenswrapper[4769]: I0131 16:43:35.947245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsc45\" (UniqueName: \"kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45\") pod \"community-operators-h5jth\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:36 crc kubenswrapper[4769]: I0131 16:43:36.048139 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:36 crc kubenswrapper[4769]: I0131 16:43:36.313615 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:36 crc kubenswrapper[4769]: I0131 16:43:36.505998 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:36 crc kubenswrapper[4769]: W0131 16:43:36.511545 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod879a5e19_b209_44ef_b322_46f785564385.slice/crio-b034807e462bbcdaf957479734d3017fe5b9b02f3c29d8d53bba30601a17807f WatchSource:0}: Error finding container b034807e462bbcdaf957479734d3017fe5b9b02f3c29d8d53bba30601a17807f: Status 404 returned error can't find the container with id b034807e462bbcdaf957479734d3017fe5b9b02f3c29d8d53bba30601a17807f Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.003354 4769 generic.go:334] "Generic (PLEG): container finished" podID="879a5e19-b209-44ef-b322-46f785564385" containerID="2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0" exitCode=0 Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.003633 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" podUID="99de27ec-fa60-442e-997f-78059569c633" containerName="registry-server" containerID="cri-o://a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c" gracePeriod=2 Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.004685 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerDied","Data":"2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0"} Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.004771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerStarted","Data":"b034807e462bbcdaf957479734d3017fe5b9b02f3c29d8d53bba30601a17807f"} Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.120839 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-bk6tw"] Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.122045 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.139677 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-bk6tw"] Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.228837 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w458x\" (UniqueName: \"kubernetes.io/projected/48020ab1-7b3f-4269-9ebd-8f275417b7cb-kube-api-access-w458x\") pod \"rabbitmq-cluster-operator-index-bk6tw\" (UID: \"48020ab1-7b3f-4269-9ebd-8f275417b7cb\") " pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.333204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w458x\" (UniqueName: \"kubernetes.io/projected/48020ab1-7b3f-4269-9ebd-8f275417b7cb-kube-api-access-w458x\") pod \"rabbitmq-cluster-operator-index-bk6tw\" (UID: \"48020ab1-7b3f-4269-9ebd-8f275417b7cb\") " pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.375575 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w458x\" (UniqueName: \"kubernetes.io/projected/48020ab1-7b3f-4269-9ebd-8f275417b7cb-kube-api-access-w458x\") pod \"rabbitmq-cluster-operator-index-bk6tw\" (UID: \"48020ab1-7b3f-4269-9ebd-8f275417b7cb\") " pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.467158 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.467670 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.535562 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlbxz\" (UniqueName: \"kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz\") pod \"99de27ec-fa60-442e-997f-78059569c633\" (UID: \"99de27ec-fa60-442e-997f-78059569c633\") " Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.542718 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz" (OuterVolumeSpecName: "kube-api-access-mlbxz") pod "99de27ec-fa60-442e-997f-78059569c633" (UID: "99de27ec-fa60-442e-997f-78059569c633"). InnerVolumeSpecName "kube-api-access-mlbxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.637798 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlbxz\" (UniqueName: \"kubernetes.io/projected/99de27ec-fa60-442e-997f-78059569c633-kube-api-access-mlbxz\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:37 crc kubenswrapper[4769]: I0131 16:43:37.724042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-bk6tw"] Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.012533 4769 generic.go:334] "Generic (PLEG): container finished" podID="99de27ec-fa60-442e-997f-78059569c633" containerID="a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c" exitCode=0 Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.012585 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.012604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" event={"ID":"99de27ec-fa60-442e-997f-78059569c633","Type":"ContainerDied","Data":"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c"} Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.013148 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-kqvxs" event={"ID":"99de27ec-fa60-442e-997f-78059569c633","Type":"ContainerDied","Data":"955c7a55e08bfd5145b4872005677eff89e0c9bc418c5e39d5e17730d11c3165"} Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.013170 4769 scope.go:117] "RemoveContainer" containerID="a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.022630 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerStarted","Data":"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a"} Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.023998 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" event={"ID":"48020ab1-7b3f-4269-9ebd-8f275417b7cb","Type":"ContainerStarted","Data":"8aee244f2d0bc05097881c1ec9d3040be34dd9c6fcdf9482fe6f16630b1b0b55"} Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.029706 4769 scope.go:117] "RemoveContainer" containerID="a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c" Jan 31 16:43:38 crc kubenswrapper[4769]: E0131 16:43:38.030263 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c\": container with ID starting with a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c not found: ID does not exist" containerID="a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.030298 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c"} err="failed to get container status \"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c\": rpc error: code = NotFound desc = could not find container \"a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c\": container with ID starting with a312b22818b6c272bacee2e5ff23b13e4bb7deb81a10f8c1e4a1625c1001fa7c not found: ID does not exist" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.066011 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.069877 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-kqvxs"] Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.598394 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.694239 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-2" Jan 31 16:43:38 crc kubenswrapper[4769]: I0131 16:43:38.719198 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99de27ec-fa60-442e-997f-78059569c633" path="/var/lib/kubelet/pods/99de27ec-fa60-442e-997f-78059569c633/volumes" Jan 31 16:43:39 crc kubenswrapper[4769]: I0131 16:43:39.032966 4769 generic.go:334] "Generic (PLEG): container finished" podID="879a5e19-b209-44ef-b322-46f785564385" containerID="6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a" exitCode=0 Jan 31 16:43:39 crc kubenswrapper[4769]: I0131 16:43:39.033034 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerDied","Data":"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a"} Jan 31 16:43:39 crc kubenswrapper[4769]: I0131 16:43:39.037925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" event={"ID":"48020ab1-7b3f-4269-9ebd-8f275417b7cb","Type":"ContainerStarted","Data":"a8b62a555cb98e015fb7cdb66b066bb22bb50d15e8abec4623634bdcd4796881"} Jan 31 16:43:39 crc kubenswrapper[4769]: I0131 16:43:39.081480 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" podStartSLOduration=1.636376082 podStartE2EDuration="2.08143933s" podCreationTimestamp="2026-01-31 16:43:37 +0000 UTC" firstStartedPulling="2026-01-31 16:43:37.736465426 +0000 UTC m=+865.810634095" lastFinishedPulling="2026-01-31 16:43:38.181528674 +0000 UTC m=+866.255697343" observedRunningTime="2026-01-31 16:43:39.074560567 +0000 UTC m=+867.148729266" watchObservedRunningTime="2026-01-31 16:43:39.08143933 +0000 UTC m=+867.155608039" Jan 31 16:43:40 crc kubenswrapper[4769]: I0131 16:43:40.045996 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerStarted","Data":"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4"} Jan 31 16:43:40 crc kubenswrapper[4769]: I0131 16:43:40.068358 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h5jth" podStartSLOduration=2.640815135 podStartE2EDuration="5.068331857s" podCreationTimestamp="2026-01-31 16:43:35 +0000 UTC" firstStartedPulling="2026-01-31 16:43:37.005764731 +0000 UTC m=+865.079933440" lastFinishedPulling="2026-01-31 16:43:39.433281483 +0000 UTC m=+867.507450162" observedRunningTime="2026-01-31 16:43:40.062448751 +0000 UTC m=+868.136617420" watchObservedRunningTime="2026-01-31 16:43:40.068331857 +0000 UTC m=+868.142500556" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.106578 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/root-account-create-update-jvd9g"] Jan 31 16:43:41 crc kubenswrapper[4769]: E0131 16:43:41.107006 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99de27ec-fa60-442e-997f-78059569c633" containerName="registry-server" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.107019 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="99de27ec-fa60-442e-997f-78059569c633" containerName="registry-server" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.107128 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="99de27ec-fa60-442e-997f-78059569c633" containerName="registry-server" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.107528 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.112748 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"openstack-mariadb-root-db-secret" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.121470 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/root-account-create-update-jvd9g"] Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.188048 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br9bz\" (UniqueName: \"kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.188114 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.289886 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br9bz\" (UniqueName: \"kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.289965 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.291022 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.319097 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br9bz\" (UniqueName: \"kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz\") pod \"root-account-create-update-jvd9g\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.428588 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:41 crc kubenswrapper[4769]: I0131 16:43:41.894762 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/root-account-create-update-jvd9g"] Jan 31 16:43:41 crc kubenswrapper[4769]: W0131 16:43:41.899362 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2425631f_500c_4ce4_9a21_7913d950b573.slice/crio-a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3 WatchSource:0}: Error finding container a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3: Status 404 returned error can't find the container with id a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3 Jan 31 16:43:42 crc kubenswrapper[4769]: I0131 16:43:42.056621 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-jvd9g" event={"ID":"2425631f-500c-4ce4-9a21-7913d950b573","Type":"ContainerStarted","Data":"a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3"} Jan 31 16:43:42 crc kubenswrapper[4769]: I0131 16:43:42.619389 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/openstack-galera-2" podUID="cc26650a-e98c-4c58-bfea-16ebfc50a1a1" containerName="galera" probeResult="failure" output=< Jan 31 16:43:42 crc kubenswrapper[4769]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Jan 31 16:43:42 crc kubenswrapper[4769]: > Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.049048 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.049483 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.088128 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-jvd9g" event={"ID":"2425631f-500c-4ce4-9a21-7913d950b573","Type":"ContainerStarted","Data":"863c7b93297b0da7036a3d10a284c9ae43b0dbb3879920d5fa8d47e63a6b623b"} Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.107713 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.121785 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/root-account-create-update-jvd9g" podStartSLOduration=5.121750737 podStartE2EDuration="5.121750737s" podCreationTimestamp="2026-01-31 16:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:43:46.104735952 +0000 UTC m=+874.178904651" watchObservedRunningTime="2026-01-31 16:43:46.121750737 +0000 UTC m=+874.195919446" Jan 31 16:43:46 crc kubenswrapper[4769]: I0131 16:43:46.163927 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.319845 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.322033 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.329796 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.377327 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfdfp\" (UniqueName: \"kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.377385 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.377599 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.467932 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.467996 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.478715 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfdfp\" (UniqueName: \"kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.478788 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.478877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.479245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.479345 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.498907 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.499182 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfdfp\" (UniqueName: \"kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp\") pod \"redhat-marketplace-nwfjq\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:47 crc kubenswrapper[4769]: I0131 16:43:47.642213 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:48 crc kubenswrapper[4769]: I0131 16:43:48.102525 4769 generic.go:334] "Generic (PLEG): container finished" podID="2425631f-500c-4ce4-9a21-7913d950b573" containerID="863c7b93297b0da7036a3d10a284c9ae43b0dbb3879920d5fa8d47e63a6b623b" exitCode=0 Jan 31 16:43:48 crc kubenswrapper[4769]: I0131 16:43:48.103581 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-jvd9g" event={"ID":"2425631f-500c-4ce4-9a21-7913d950b573","Type":"ContainerDied","Data":"863c7b93297b0da7036a3d10a284c9ae43b0dbb3879920d5fa8d47e63a6b623b"} Jan 31 16:43:48 crc kubenswrapper[4769]: I0131 16:43:48.119234 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:43:48 crc kubenswrapper[4769]: I0131 16:43:48.134650 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-bk6tw" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.111473 4769 generic.go:334] "Generic (PLEG): container finished" podID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerID="7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b" exitCode=0 Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.111573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerDied","Data":"7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b"} Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.111635 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerStarted","Data":"b85ac876138ed9adc565c97e15fa8a9662dd7b1c1cf9a93b95f0fa33a0268d13"} Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.485824 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.508140 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br9bz\" (UniqueName: \"kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz\") pod \"2425631f-500c-4ce4-9a21-7913d950b573\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.508283 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts\") pod \"2425631f-500c-4ce4-9a21-7913d950b573\" (UID: \"2425631f-500c-4ce4-9a21-7913d950b573\") " Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.509028 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2425631f-500c-4ce4-9a21-7913d950b573" (UID: "2425631f-500c-4ce4-9a21-7913d950b573"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.515588 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz" (OuterVolumeSpecName: "kube-api-access-br9bz") pod "2425631f-500c-4ce4-9a21-7913d950b573" (UID: "2425631f-500c-4ce4-9a21-7913d950b573"). InnerVolumeSpecName "kube-api-access-br9bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.609799 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br9bz\" (UniqueName: \"kubernetes.io/projected/2425631f-500c-4ce4-9a21-7913d950b573-kube-api-access-br9bz\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.609834 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425631f-500c-4ce4-9a21-7913d950b573-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.782241 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9"] Jan 31 16:43:49 crc kubenswrapper[4769]: E0131 16:43:49.782873 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2425631f-500c-4ce4-9a21-7913d950b573" containerName="mariadb-account-create-update" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.782895 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2425631f-500c-4ce4-9a21-7913d950b573" containerName="mariadb-account-create-update" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.783056 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2425631f-500c-4ce4-9a21-7913d950b573" containerName="mariadb-account-create-update" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.784576 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.788724 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.791191 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9"] Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.812291 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.812405 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.812449 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9g8c\" (UniqueName: \"kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.913338 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.913409 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9g8c\" (UniqueName: \"kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.913460 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.914030 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.914204 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:49 crc kubenswrapper[4769]: I0131 16:43:49.930134 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9g8c\" (UniqueName: \"kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.097167 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.118530 4769 generic.go:334] "Generic (PLEG): container finished" podID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerID="5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f" exitCode=0 Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.118605 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerDied","Data":"5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f"} Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.121023 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/root-account-create-update-jvd9g" event={"ID":"2425631f-500c-4ce4-9a21-7913d950b573","Type":"ContainerDied","Data":"a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3"} Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.121062 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3792dbc24c0fc38d008a395f89614ceac4c67cff6617d72603a7de32e1163c3" Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.121123 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/root-account-create-update-jvd9g" Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.339591 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9"] Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.512436 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.512732 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h5jth" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="registry-server" containerID="cri-o://b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4" gracePeriod=2 Jan 31 16:43:50 crc kubenswrapper[4769]: I0131 16:43:50.936833 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.133519 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsc45\" (UniqueName: \"kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45\") pod \"879a5e19-b209-44ef-b322-46f785564385\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.133656 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content\") pod \"879a5e19-b209-44ef-b322-46f785564385\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.133697 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities\") pod \"879a5e19-b209-44ef-b322-46f785564385\" (UID: \"879a5e19-b209-44ef-b322-46f785564385\") " Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.137624 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities" (OuterVolumeSpecName: "utilities") pod "879a5e19-b209-44ef-b322-46f785564385" (UID: "879a5e19-b209-44ef-b322-46f785564385"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.142020 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45" (OuterVolumeSpecName: "kube-api-access-zsc45") pod "879a5e19-b209-44ef-b322-46f785564385" (UID: "879a5e19-b209-44ef-b322-46f785564385"). InnerVolumeSpecName "kube-api-access-zsc45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.143086 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerID="29f96e9151357e17dd33576a16e01a1c61fffc7b9ef7fe65124618adba0c53e4" exitCode=0 Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.143159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" event={"ID":"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7","Type":"ContainerDied","Data":"29f96e9151357e17dd33576a16e01a1c61fffc7b9ef7fe65124618adba0c53e4"} Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.143201 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" event={"ID":"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7","Type":"ContainerStarted","Data":"d0ae46b291a2289f20ad31d3c4b467c9b711fa402eedd4af90dbef8bda30478c"} Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.147779 4769 generic.go:334] "Generic (PLEG): container finished" podID="879a5e19-b209-44ef-b322-46f785564385" containerID="b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4" exitCode=0 Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.147886 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerDied","Data":"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4"} Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.147927 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h5jth" event={"ID":"879a5e19-b209-44ef-b322-46f785564385","Type":"ContainerDied","Data":"b034807e462bbcdaf957479734d3017fe5b9b02f3c29d8d53bba30601a17807f"} Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.147951 4769 scope.go:117] "RemoveContainer" containerID="b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.148192 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h5jth" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.150930 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerStarted","Data":"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308"} Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.169456 4769 scope.go:117] "RemoveContainer" containerID="6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.191264 4769 scope.go:117] "RemoveContainer" containerID="2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.195785 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nwfjq" podStartSLOduration=2.744665194 podStartE2EDuration="4.19577008s" podCreationTimestamp="2026-01-31 16:43:47 +0000 UTC" firstStartedPulling="2026-01-31 16:43:49.113671179 +0000 UTC m=+877.187839888" lastFinishedPulling="2026-01-31 16:43:50.564776095 +0000 UTC m=+878.638944774" observedRunningTime="2026-01-31 16:43:51.192121014 +0000 UTC m=+879.266289683" watchObservedRunningTime="2026-01-31 16:43:51.19577008 +0000 UTC m=+879.269938739" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.210320 4769 scope.go:117] "RemoveContainer" containerID="b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4" Jan 31 16:43:51 crc kubenswrapper[4769]: E0131 16:43:51.210876 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4\": container with ID starting with b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4 not found: ID does not exist" containerID="b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.210919 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4"} err="failed to get container status \"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4\": rpc error: code = NotFound desc = could not find container \"b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4\": container with ID starting with b4321ffe60e2aa9044396b1335bfe80932c139af7ff32aed6312fed3aadb69a4 not found: ID does not exist" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.210946 4769 scope.go:117] "RemoveContainer" containerID="6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a" Jan 31 16:43:51 crc kubenswrapper[4769]: E0131 16:43:51.212560 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a\": container with ID starting with 6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a not found: ID does not exist" containerID="6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.212604 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a"} err="failed to get container status \"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a\": rpc error: code = NotFound desc = could not find container \"6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a\": container with ID starting with 6098037c275fdcb870b37ca57a2455cca8d7f3d9c0da0eef79cda030a133803a not found: ID does not exist" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.212630 4769 scope.go:117] "RemoveContainer" containerID="2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0" Jan 31 16:43:51 crc kubenswrapper[4769]: E0131 16:43:51.212880 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0\": container with ID starting with 2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0 not found: ID does not exist" containerID="2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.212901 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0"} err="failed to get container status \"2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0\": rpc error: code = NotFound desc = could not find container \"2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0\": container with ID starting with 2fd63772dbe95cb1dfd13a5eb055af2b4fe5f532f93fc37909cbcc4f1ec3bda0 not found: ID does not exist" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.212931 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "879a5e19-b209-44ef-b322-46f785564385" (UID: "879a5e19-b209-44ef-b322-46f785564385"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.237356 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.237384 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsc45\" (UniqueName: \"kubernetes.io/projected/879a5e19-b209-44ef-b322-46f785564385-kube-api-access-zsc45\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.237395 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/879a5e19-b209-44ef-b322-46f785564385-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.251313 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.326315 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-1" Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.476415 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:51 crc kubenswrapper[4769]: I0131 16:43:51.480537 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h5jth"] Jan 31 16:43:52 crc kubenswrapper[4769]: I0131 16:43:52.158775 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerID="2628973662f10b79f3d069c47482bc1a2b0902bfdcfc1a12e66d0847940bf164" exitCode=0 Jan 31 16:43:52 crc kubenswrapper[4769]: I0131 16:43:52.158827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" event={"ID":"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7","Type":"ContainerDied","Data":"2628973662f10b79f3d069c47482bc1a2b0902bfdcfc1a12e66d0847940bf164"} Jan 31 16:43:52 crc kubenswrapper[4769]: I0131 16:43:52.719449 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="879a5e19-b209-44ef-b322-46f785564385" path="/var/lib/kubelet/pods/879a5e19-b209-44ef-b322-46f785564385/volumes" Jan 31 16:43:52 crc kubenswrapper[4769]: I0131 16:43:52.969533 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:53 crc kubenswrapper[4769]: I0131 16:43:53.048770 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/openstack-galera-0" Jan 31 16:43:53 crc kubenswrapper[4769]: I0131 16:43:53.167565 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerID="395e03cc6ec17e1382ff8b0ad75d58bb571d1c8eeff9627fd546a9982d9b9cee" exitCode=0 Jan 31 16:43:53 crc kubenswrapper[4769]: I0131 16:43:53.167622 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" event={"ID":"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7","Type":"ContainerDied","Data":"395e03cc6ec17e1382ff8b0ad75d58bb571d1c8eeff9627fd546a9982d9b9cee"} Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.502720 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.693144 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9g8c\" (UniqueName: \"kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c\") pod \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.693225 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util\") pod \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.693273 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle\") pod \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\" (UID: \"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7\") " Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.694138 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle" (OuterVolumeSpecName: "bundle") pod "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" (UID: "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.703009 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c" (OuterVolumeSpecName: "kube-api-access-t9g8c") pod "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" (UID: "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7"). InnerVolumeSpecName "kube-api-access-t9g8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.719571 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util" (OuterVolumeSpecName: "util") pod "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" (UID: "ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.795199 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9g8c\" (UniqueName: \"kubernetes.io/projected/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-kube-api-access-t9g8c\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.795229 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:54 crc kubenswrapper[4769]: I0131 16:43:54.795239 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:43:55 crc kubenswrapper[4769]: I0131 16:43:55.185536 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" event={"ID":"ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7","Type":"ContainerDied","Data":"d0ae46b291a2289f20ad31d3c4b467c9b711fa402eedd4af90dbef8bda30478c"} Jan 31 16:43:55 crc kubenswrapper[4769]: I0131 16:43:55.185578 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0ae46b291a2289f20ad31d3c4b467c9b711fa402eedd4af90dbef8bda30478c" Jan 31 16:43:55 crc kubenswrapper[4769]: I0131 16:43:55.185643 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9" Jan 31 16:43:57 crc kubenswrapper[4769]: I0131 16:43:57.647359 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:57 crc kubenswrapper[4769]: I0131 16:43:57.647427 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:57 crc kubenswrapper[4769]: I0131 16:43:57.716594 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:43:58 crc kubenswrapper[4769]: I0131 16:43:58.274555 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.308824 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.309105 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nwfjq" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="registry-server" containerID="cri-o://e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308" gracePeriod=2 Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.804537 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.995347 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content\") pod \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.995436 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfdfp\" (UniqueName: \"kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp\") pod \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.995487 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities\") pod \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\" (UID: \"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9\") " Jan 31 16:44:01 crc kubenswrapper[4769]: I0131 16:44:01.997096 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities" (OuterVolumeSpecName: "utilities") pod "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" (UID: "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.005283 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp" (OuterVolumeSpecName: "kube-api-access-tfdfp") pod "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" (UID: "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9"). InnerVolumeSpecName "kube-api-access-tfdfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.042842 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" (UID: "f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.097221 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfdfp\" (UniqueName: \"kubernetes.io/projected/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-kube-api-access-tfdfp\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.097262 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.097275 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.233881 4769 generic.go:334] "Generic (PLEG): container finished" podID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerID="e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308" exitCode=0 Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.233941 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerDied","Data":"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308"} Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.233983 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nwfjq" event={"ID":"f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9","Type":"ContainerDied","Data":"b85ac876138ed9adc565c97e15fa8a9662dd7b1c1cf9a93b95f0fa33a0268d13"} Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.234010 4769 scope.go:117] "RemoveContainer" containerID="e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.234012 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nwfjq" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.274783 4769 scope.go:117] "RemoveContainer" containerID="5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.282776 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.303070 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nwfjq"] Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.304549 4769 scope.go:117] "RemoveContainer" containerID="7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.336463 4769 scope.go:117] "RemoveContainer" containerID="e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308" Jan 31 16:44:02 crc kubenswrapper[4769]: E0131 16:44:02.336998 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308\": container with ID starting with e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308 not found: ID does not exist" containerID="e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.337055 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308"} err="failed to get container status \"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308\": rpc error: code = NotFound desc = could not find container \"e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308\": container with ID starting with e5410bca71522dccf059042ef7dea37c3769434328cf82ab8e27d25ada2d5308 not found: ID does not exist" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.337091 4769 scope.go:117] "RemoveContainer" containerID="5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f" Jan 31 16:44:02 crc kubenswrapper[4769]: E0131 16:44:02.337925 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f\": container with ID starting with 5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f not found: ID does not exist" containerID="5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.337967 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f"} err="failed to get container status \"5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f\": rpc error: code = NotFound desc = could not find container \"5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f\": container with ID starting with 5608152b6b5ab58c3d871677c070ffbad5c10ae2cd89a608eee1c6ed4e50b03f not found: ID does not exist" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.338002 4769 scope.go:117] "RemoveContainer" containerID="7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b" Jan 31 16:44:02 crc kubenswrapper[4769]: E0131 16:44:02.338425 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b\": container with ID starting with 7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b not found: ID does not exist" containerID="7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.338464 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b"} err="failed to get container status \"7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b\": rpc error: code = NotFound desc = could not find container \"7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b\": container with ID starting with 7b97dfdb286acade5816f6344d9b0f27b6ec847c2aae05c95d4df635f23f3a4b not found: ID does not exist" Jan 31 16:44:02 crc kubenswrapper[4769]: I0131 16:44:02.720945 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" path="/var/lib/kubelet/pods/f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9/volumes" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.745941 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn"] Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746794 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746809 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746825 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746833 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746841 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="pull" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746849 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="pull" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746864 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="util" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746872 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="util" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746886 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="extract-utilities" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746893 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="extract-utilities" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746904 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="extract-content" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746912 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="extract-content" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746925 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="extract-utilities" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746933 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="extract-utilities" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746948 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="extract-content" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746956 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="extract-content" Jan 31 16:44:12 crc kubenswrapper[4769]: E0131 16:44:12.746977 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="extract" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.746985 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="extract" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.747114 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7" containerName="extract" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.747130 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7ec2f2b-ab4e-4bf7-bf24-fd89ebf02ec9" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.747144 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="879a5e19-b209-44ef-b322-46f785564385" containerName="registry-server" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.747682 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.757655 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-jdj6b" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.764691 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn"] Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.850610 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn552\" (UniqueName: \"kubernetes.io/projected/729bbda5-c237-4a8e-abc9-a80b755e1cd1-kube-api-access-sn552\") pod \"rabbitmq-cluster-operator-779fc9694b-kf6dn\" (UID: \"729bbda5-c237-4a8e-abc9-a80b755e1cd1\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.952214 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn552\" (UniqueName: \"kubernetes.io/projected/729bbda5-c237-4a8e-abc9-a80b755e1cd1-kube-api-access-sn552\") pod \"rabbitmq-cluster-operator-779fc9694b-kf6dn\" (UID: \"729bbda5-c237-4a8e-abc9-a80b755e1cd1\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" Jan 31 16:44:12 crc kubenswrapper[4769]: I0131 16:44:12.976003 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn552\" (UniqueName: \"kubernetes.io/projected/729bbda5-c237-4a8e-abc9-a80b755e1cd1-kube-api-access-sn552\") pod \"rabbitmq-cluster-operator-779fc9694b-kf6dn\" (UID: \"729bbda5-c237-4a8e-abc9-a80b755e1cd1\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" Jan 31 16:44:13 crc kubenswrapper[4769]: I0131 16:44:13.089197 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" Jan 31 16:44:13 crc kubenswrapper[4769]: I0131 16:44:13.604854 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn"] Jan 31 16:44:13 crc kubenswrapper[4769]: W0131 16:44:13.611525 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod729bbda5_c237_4a8e_abc9_a80b755e1cd1.slice/crio-eecc4a0d245c7332d1b1f92e862063da3db086278e81865b8f6ca108f14a2040 WatchSource:0}: Error finding container eecc4a0d245c7332d1b1f92e862063da3db086278e81865b8f6ca108f14a2040: Status 404 returned error can't find the container with id eecc4a0d245c7332d1b1f92e862063da3db086278e81865b8f6ca108f14a2040 Jan 31 16:44:14 crc kubenswrapper[4769]: I0131 16:44:14.324815 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" event={"ID":"729bbda5-c237-4a8e-abc9-a80b755e1cd1","Type":"ContainerStarted","Data":"eecc4a0d245c7332d1b1f92e862063da3db086278e81865b8f6ca108f14a2040"} Jan 31 16:44:17 crc kubenswrapper[4769]: I0131 16:44:17.941215 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" event={"ID":"729bbda5-c237-4a8e-abc9-a80b755e1cd1","Type":"ContainerStarted","Data":"e5c0f1b62943f8e944fb272f39466be5c363fd341a8ea168892067c1bab751e8"} Jan 31 16:44:17 crc kubenswrapper[4769]: I0131 16:44:17.956193 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-kf6dn" podStartSLOduration=2.196413217 podStartE2EDuration="5.956172156s" podCreationTimestamp="2026-01-31 16:44:12 +0000 UTC" firstStartedPulling="2026-01-31 16:44:13.614787328 +0000 UTC m=+901.688956007" lastFinishedPulling="2026-01-31 16:44:17.374546267 +0000 UTC m=+905.448714946" observedRunningTime="2026-01-31 16:44:17.954706707 +0000 UTC m=+906.028875396" watchObservedRunningTime="2026-01-31 16:44:17.956172156 +0000 UTC m=+906.030340835" Jan 31 16:44:20 crc kubenswrapper[4769]: I0131 16:44:20.682297 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:44:20 crc kubenswrapper[4769]: I0131 16:44:20.682585 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.695771 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.697757 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.702626 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-erlang-cookie" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.704528 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"rabbitmq-server-conf" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.704680 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-default-user" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.704753 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"rabbitmq-server-dockercfg-jfmpk" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.705358 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"rabbitmq-plugins-conf" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.719945 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.839890 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.839933 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbwvq\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-kube-api-access-jbwvq\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.839959 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99300305-94b5-426d-a930-a4420fc775d7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.839979 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.840019 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99300305-94b5-426d-a930-a4420fc775d7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.840036 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.840162 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99300305-94b5-426d-a930-a4420fc775d7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.840184 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.941991 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942064 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbwvq\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-kube-api-access-jbwvq\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942111 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99300305-94b5-426d-a930-a4420fc775d7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942143 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942243 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99300305-94b5-426d-a930-a4420fc775d7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942265 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942296 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99300305-94b5-426d-a930-a4420fc775d7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942337 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942433 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.942807 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.943446 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99300305-94b5-426d-a930-a4420fc775d7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.945527 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.945551 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c31b94d74ca617b00df001301a29a56b483be1bc408b9b5fe85a0260db9313ad/globalmount\"" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.948755 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99300305-94b5-426d-a930-a4420fc775d7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.948932 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99300305-94b5-426d-a930-a4420fc775d7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.951015 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.962939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbwvq\" (UniqueName: \"kubernetes.io/projected/99300305-94b5-426d-a930-a4420fc775d7-kube-api-access-jbwvq\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:22 crc kubenswrapper[4769]: I0131 16:44:22.983300 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-72e35d53-4e9d-4ef3-9937-a2bb16453e1d\") pod \"rabbitmq-server-0\" (UID: \"99300305-94b5-426d-a930-a4420fc775d7\") " pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:23 crc kubenswrapper[4769]: I0131 16:44:23.016093 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:44:23 crc kubenswrapper[4769]: I0131 16:44:23.288176 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/rabbitmq-server-0"] Jan 31 16:44:23 crc kubenswrapper[4769]: W0131 16:44:23.302006 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99300305_94b5_426d_a930_a4420fc775d7.slice/crio-c34a381807ed85686386cd0d1b71d0c1653c1c11d60691d61c1bc0a106c8d86a WatchSource:0}: Error finding container c34a381807ed85686386cd0d1b71d0c1653c1c11d60691d61c1bc0a106c8d86a: Status 404 returned error can't find the container with id c34a381807ed85686386cd0d1b71d0c1653c1c11d60691d61c1bc0a106c8d86a Jan 31 16:44:23 crc kubenswrapper[4769]: I0131 16:44:23.983870 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"99300305-94b5-426d-a930-a4420fc775d7","Type":"ContainerStarted","Data":"c34a381807ed85686386cd0d1b71d0c1653c1c11d60691d61c1bc0a106c8d86a"} Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.323675 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-pp4vk"] Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.324563 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.326427 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-w2sn6" Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.336210 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-pp4vk"] Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.464034 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5ncj\" (UniqueName: \"kubernetes.io/projected/a2a60854-2368-418f-904d-108236556cfd-kube-api-access-j5ncj\") pod \"keystone-operator-index-pp4vk\" (UID: \"a2a60854-2368-418f-904d-108236556cfd\") " pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.565524 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5ncj\" (UniqueName: \"kubernetes.io/projected/a2a60854-2368-418f-904d-108236556cfd-kube-api-access-j5ncj\") pod \"keystone-operator-index-pp4vk\" (UID: \"a2a60854-2368-418f-904d-108236556cfd\") " pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.586173 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5ncj\" (UniqueName: \"kubernetes.io/projected/a2a60854-2368-418f-904d-108236556cfd-kube-api-access-j5ncj\") pod \"keystone-operator-index-pp4vk\" (UID: \"a2a60854-2368-418f-904d-108236556cfd\") " pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:24 crc kubenswrapper[4769]: I0131 16:44:24.688468 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:25 crc kubenswrapper[4769]: I0131 16:44:25.067749 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-pp4vk"] Jan 31 16:44:25 crc kubenswrapper[4769]: I0131 16:44:25.997582 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-pp4vk" event={"ID":"a2a60854-2368-418f-904d-108236556cfd","Type":"ContainerStarted","Data":"3d5d9d5d88873f45646307e1fb66ae62c7a7b2cd52e71b4aece548f74b7dbaef"} Jan 31 16:44:28 crc kubenswrapper[4769]: I0131 16:44:28.011808 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-pp4vk" event={"ID":"a2a60854-2368-418f-904d-108236556cfd","Type":"ContainerStarted","Data":"64d73bc747c44bb74f6391e02cf3a48f275870961a94180fdb95a20f5ee75984"} Jan 31 16:44:28 crc kubenswrapper[4769]: I0131 16:44:28.030548 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-pp4vk" podStartSLOduration=1.872066062 podStartE2EDuration="4.03051674s" podCreationTimestamp="2026-01-31 16:44:24 +0000 UTC" firstStartedPulling="2026-01-31 16:44:25.084724307 +0000 UTC m=+913.158892976" lastFinishedPulling="2026-01-31 16:44:27.243174985 +0000 UTC m=+915.317343654" observedRunningTime="2026-01-31 16:44:28.023615505 +0000 UTC m=+916.097784194" watchObservedRunningTime="2026-01-31 16:44:28.03051674 +0000 UTC m=+916.104685419" Jan 31 16:44:32 crc kubenswrapper[4769]: I0131 16:44:32.039325 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"99300305-94b5-426d-a930-a4420fc775d7","Type":"ContainerStarted","Data":"c094a72b66659b1ed5046d32f6e8dbc270fc663565c48a5a4bd3343756c90925"} Jan 31 16:44:34 crc kubenswrapper[4769]: I0131 16:44:34.688881 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:34 crc kubenswrapper[4769]: I0131 16:44:34.689270 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:34 crc kubenswrapper[4769]: I0131 16:44:34.724700 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:35 crc kubenswrapper[4769]: I0131 16:44:35.097060 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-pp4vk" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.353942 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77"] Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.356356 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.358089 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.367838 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77"] Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.510968 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.511099 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhsgf\" (UniqueName: \"kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.511173 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.613020 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.613431 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.613626 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhsgf\" (UniqueName: \"kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.613812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.614245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.638934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhsgf\" (UniqueName: \"kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf\") pod \"b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:41 crc kubenswrapper[4769]: I0131 16:44:41.673845 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:42 crc kubenswrapper[4769]: I0131 16:44:42.132958 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77"] Jan 31 16:44:43 crc kubenswrapper[4769]: I0131 16:44:43.126231 4769 generic.go:334] "Generic (PLEG): container finished" podID="607e4223-7a52-479b-9347-1c5e55698bcc" containerID="818b1ecbcc6775a0f5fcb1f7948d54a2bc00500e44963a01456797b7bf3ce46c" exitCode=0 Jan 31 16:44:43 crc kubenswrapper[4769]: I0131 16:44:43.126362 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" event={"ID":"607e4223-7a52-479b-9347-1c5e55698bcc","Type":"ContainerDied","Data":"818b1ecbcc6775a0f5fcb1f7948d54a2bc00500e44963a01456797b7bf3ce46c"} Jan 31 16:44:43 crc kubenswrapper[4769]: I0131 16:44:43.126739 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" event={"ID":"607e4223-7a52-479b-9347-1c5e55698bcc","Type":"ContainerStarted","Data":"d153ae93865ea590409cadedaa1a8f4b6fcd3753b8085fabbf3379894bbe13e6"} Jan 31 16:44:44 crc kubenswrapper[4769]: I0131 16:44:44.135259 4769 generic.go:334] "Generic (PLEG): container finished" podID="607e4223-7a52-479b-9347-1c5e55698bcc" containerID="e66830fd7dc7c020fb197f3cdb62abacd1e9db6dff851b9daf7030eff38429f7" exitCode=0 Jan 31 16:44:44 crc kubenswrapper[4769]: I0131 16:44:44.135318 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" event={"ID":"607e4223-7a52-479b-9347-1c5e55698bcc","Type":"ContainerDied","Data":"e66830fd7dc7c020fb197f3cdb62abacd1e9db6dff851b9daf7030eff38429f7"} Jan 31 16:44:45 crc kubenswrapper[4769]: I0131 16:44:45.144640 4769 generic.go:334] "Generic (PLEG): container finished" podID="607e4223-7a52-479b-9347-1c5e55698bcc" containerID="eb11ff984227528ea85d4e5398bea0d4823e743d64a5c708680669f8598ee3e1" exitCode=0 Jan 31 16:44:45 crc kubenswrapper[4769]: I0131 16:44:45.144750 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" event={"ID":"607e4223-7a52-479b-9347-1c5e55698bcc","Type":"ContainerDied","Data":"eb11ff984227528ea85d4e5398bea0d4823e743d64a5c708680669f8598ee3e1"} Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.484841 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.583995 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util\") pod \"607e4223-7a52-479b-9347-1c5e55698bcc\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.584038 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhsgf\" (UniqueName: \"kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf\") pod \"607e4223-7a52-479b-9347-1c5e55698bcc\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.584102 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle\") pod \"607e4223-7a52-479b-9347-1c5e55698bcc\" (UID: \"607e4223-7a52-479b-9347-1c5e55698bcc\") " Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.585429 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle" (OuterVolumeSpecName: "bundle") pod "607e4223-7a52-479b-9347-1c5e55698bcc" (UID: "607e4223-7a52-479b-9347-1c5e55698bcc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.592728 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf" (OuterVolumeSpecName: "kube-api-access-lhsgf") pod "607e4223-7a52-479b-9347-1c5e55698bcc" (UID: "607e4223-7a52-479b-9347-1c5e55698bcc"). InnerVolumeSpecName "kube-api-access-lhsgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.616595 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util" (OuterVolumeSpecName: "util") pod "607e4223-7a52-479b-9347-1c5e55698bcc" (UID: "607e4223-7a52-479b-9347-1c5e55698bcc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.686044 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.686089 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhsgf\" (UniqueName: \"kubernetes.io/projected/607e4223-7a52-479b-9347-1c5e55698bcc-kube-api-access-lhsgf\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:46 crc kubenswrapper[4769]: I0131 16:44:46.686104 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607e4223-7a52-479b-9347-1c5e55698bcc-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:44:47 crc kubenswrapper[4769]: I0131 16:44:47.162935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" event={"ID":"607e4223-7a52-479b-9347-1c5e55698bcc","Type":"ContainerDied","Data":"d153ae93865ea590409cadedaa1a8f4b6fcd3753b8085fabbf3379894bbe13e6"} Jan 31 16:44:47 crc kubenswrapper[4769]: I0131 16:44:47.162990 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d153ae93865ea590409cadedaa1a8f4b6fcd3753b8085fabbf3379894bbe13e6" Jan 31 16:44:47 crc kubenswrapper[4769]: I0131 16:44:47.163034 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77" Jan 31 16:44:50 crc kubenswrapper[4769]: I0131 16:44:50.682331 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:44:50 crc kubenswrapper[4769]: I0131 16:44:50.682885 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.249318 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn"] Jan 31 16:44:59 crc kubenswrapper[4769]: E0131 16:44:59.251334 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="extract" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.251428 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="extract" Jan 31 16:44:59 crc kubenswrapper[4769]: E0131 16:44:59.251544 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="pull" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.251627 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="pull" Jan 31 16:44:59 crc kubenswrapper[4769]: E0131 16:44:59.251714 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="util" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.251784 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="util" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.252001 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="607e4223-7a52-479b-9347-1c5e55698bcc" containerName="extract" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.252648 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.255416 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.255448 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-xphcv" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.269235 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn"] Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.384782 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-apiservice-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.384825 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-webhook-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.384842 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc7ns\" (UniqueName: \"kubernetes.io/projected/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-kube-api-access-fc7ns\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.485865 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-apiservice-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.485913 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-webhook-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.485936 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc7ns\" (UniqueName: \"kubernetes.io/projected/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-kube-api-access-fc7ns\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.491169 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-apiservice-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.499985 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-webhook-cert\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.504250 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc7ns\" (UniqueName: \"kubernetes.io/projected/1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92-kube-api-access-fc7ns\") pod \"keystone-operator-controller-manager-9fd969457-jfqwn\" (UID: \"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92\") " pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:44:59 crc kubenswrapper[4769]: I0131 16:44:59.567098 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.029678 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn"] Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.165089 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl"] Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.166382 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.168828 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.169075 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.188091 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl"] Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.265036 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" event={"ID":"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92","Type":"ContainerStarted","Data":"7440b3ece196f582b722fd13989cb1ce72fe0c9e9903550f88d5ccaf16e49eb5"} Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.302866 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.302926 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.302969 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59wxz\" (UniqueName: \"kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.404574 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.404697 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59wxz\" (UniqueName: \"kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.404779 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.405757 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.411870 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.432196 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59wxz\" (UniqueName: \"kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz\") pod \"collect-profiles-29497965-mhhhl\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.495890 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:00 crc kubenswrapper[4769]: I0131 16:45:00.765539 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl"] Jan 31 16:45:01 crc kubenswrapper[4769]: I0131 16:45:01.273283 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" event={"ID":"dc05825e-5ee7-47d5-862a-03053f618dbe","Type":"ContainerStarted","Data":"6ee0f489940234ed079a3efd5a411a6be7b5995f247b0a0a16a46631f4cb7fb5"} Jan 31 16:45:01 crc kubenswrapper[4769]: I0131 16:45:01.273340 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" event={"ID":"dc05825e-5ee7-47d5-862a-03053f618dbe","Type":"ContainerStarted","Data":"c0d8948900811f85c4381717255bb6297c0d8144de9b07077492b6b612389844"} Jan 31 16:45:01 crc kubenswrapper[4769]: I0131 16:45:01.295934 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" podStartSLOduration=1.29590923 podStartE2EDuration="1.29590923s" podCreationTimestamp="2026-01-31 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:45:01.288610836 +0000 UTC m=+949.362779515" watchObservedRunningTime="2026-01-31 16:45:01.29590923 +0000 UTC m=+949.370077919" Jan 31 16:45:02 crc kubenswrapper[4769]: I0131 16:45:02.286078 4769 generic.go:334] "Generic (PLEG): container finished" podID="dc05825e-5ee7-47d5-862a-03053f618dbe" containerID="6ee0f489940234ed079a3efd5a411a6be7b5995f247b0a0a16a46631f4cb7fb5" exitCode=0 Jan 31 16:45:02 crc kubenswrapper[4769]: I0131 16:45:02.286130 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" event={"ID":"dc05825e-5ee7-47d5-862a-03053f618dbe","Type":"ContainerDied","Data":"6ee0f489940234ed079a3efd5a411a6be7b5995f247b0a0a16a46631f4cb7fb5"} Jan 31 16:45:03 crc kubenswrapper[4769]: I0131 16:45:03.293015 4769 generic.go:334] "Generic (PLEG): container finished" podID="99300305-94b5-426d-a930-a4420fc775d7" containerID="c094a72b66659b1ed5046d32f6e8dbc270fc663565c48a5a4bd3343756c90925" exitCode=0 Jan 31 16:45:03 crc kubenswrapper[4769]: I0131 16:45:03.293109 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"99300305-94b5-426d-a930-a4420fc775d7","Type":"ContainerDied","Data":"c094a72b66659b1ed5046d32f6e8dbc270fc663565c48a5a4bd3343756c90925"} Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.758731 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.869071 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59wxz\" (UniqueName: \"kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz\") pod \"dc05825e-5ee7-47d5-862a-03053f618dbe\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.869343 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume\") pod \"dc05825e-5ee7-47d5-862a-03053f618dbe\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.869542 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume\") pod \"dc05825e-5ee7-47d5-862a-03053f618dbe\" (UID: \"dc05825e-5ee7-47d5-862a-03053f618dbe\") " Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.870610 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume" (OuterVolumeSpecName: "config-volume") pod "dc05825e-5ee7-47d5-862a-03053f618dbe" (UID: "dc05825e-5ee7-47d5-862a-03053f618dbe"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.873456 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz" (OuterVolumeSpecName: "kube-api-access-59wxz") pod "dc05825e-5ee7-47d5-862a-03053f618dbe" (UID: "dc05825e-5ee7-47d5-862a-03053f618dbe"). InnerVolumeSpecName "kube-api-access-59wxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.876984 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dc05825e-5ee7-47d5-862a-03053f618dbe" (UID: "dc05825e-5ee7-47d5-862a-03053f618dbe"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.971108 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59wxz\" (UniqueName: \"kubernetes.io/projected/dc05825e-5ee7-47d5-862a-03053f618dbe-kube-api-access-59wxz\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.971153 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dc05825e-5ee7-47d5-862a-03053f618dbe-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:04 crc kubenswrapper[4769]: I0131 16:45:04.971167 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dc05825e-5ee7-47d5-862a-03053f618dbe-config-volume\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.316008 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" event={"ID":"dc05825e-5ee7-47d5-862a-03053f618dbe","Type":"ContainerDied","Data":"c0d8948900811f85c4381717255bb6297c0d8144de9b07077492b6b612389844"} Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.316272 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0d8948900811f85c4381717255bb6297c0d8144de9b07077492b6b612389844" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.316324 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497965-mhhhl" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.319008 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" event={"ID":"1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92","Type":"ContainerStarted","Data":"44925958cb74eac4605319fccdced0b8e39c60337b1560dfef6197f8869bccce"} Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.319121 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.321651 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/rabbitmq-server-0" event={"ID":"99300305-94b5-426d-a930-a4420fc775d7","Type":"ContainerStarted","Data":"d00e807592648811e290cb14fd2b4ccb81721614591f610e42f3b247bfd31863"} Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.322094 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.348478 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" podStartSLOduration=1.62809188 podStartE2EDuration="6.348453475s" podCreationTimestamp="2026-01-31 16:44:59 +0000 UTC" firstStartedPulling="2026-01-31 16:45:00.038098898 +0000 UTC m=+948.112267607" lastFinishedPulling="2026-01-31 16:45:04.758460503 +0000 UTC m=+952.832629202" observedRunningTime="2026-01-31 16:45:05.342024424 +0000 UTC m=+953.416193103" watchObservedRunningTime="2026-01-31 16:45:05.348453475 +0000 UTC m=+953.422622174" Jan 31 16:45:05 crc kubenswrapper[4769]: I0131 16:45:05.796167 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/rabbitmq-server-0" podStartSLOduration=37.628813358 podStartE2EDuration="44.796129063s" podCreationTimestamp="2026-01-31 16:44:21 +0000 UTC" firstStartedPulling="2026-01-31 16:44:23.305257715 +0000 UTC m=+911.379426394" lastFinishedPulling="2026-01-31 16:44:30.47257344 +0000 UTC m=+918.546742099" observedRunningTime="2026-01-31 16:45:05.384274441 +0000 UTC m=+953.458443190" watchObservedRunningTime="2026-01-31 16:45:05.796129063 +0000 UTC m=+953.870297732" Jan 31 16:45:09 crc kubenswrapper[4769]: I0131 16:45:09.574097 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-9fd969457-jfqwn" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.315082 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:14 crc kubenswrapper[4769]: E0131 16:45:14.316078 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc05825e-5ee7-47d5-862a-03053f618dbe" containerName="collect-profiles" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.316097 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc05825e-5ee7-47d5-862a-03053f618dbe" containerName="collect-profiles" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.316243 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc05825e-5ee7-47d5-862a-03053f618dbe" containerName="collect-profiles" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.316746 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.318762 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-index-dockercfg-spl29" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.322164 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.498804 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpsw2\" (UniqueName: \"kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2\") pod \"barbican-operator-index-nrvsg\" (UID: \"3b198919-7d1c-472e-99e9-febd4bc128da\") " pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.599884 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpsw2\" (UniqueName: \"kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2\") pod \"barbican-operator-index-nrvsg\" (UID: \"3b198919-7d1c-472e-99e9-febd4bc128da\") " pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.617263 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpsw2\" (UniqueName: \"kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2\") pod \"barbican-operator-index-nrvsg\" (UID: \"3b198919-7d1c-472e-99e9-febd4bc128da\") " pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:14 crc kubenswrapper[4769]: I0131 16:45:14.640205 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:15 crc kubenswrapper[4769]: I0131 16:45:15.085216 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:15 crc kubenswrapper[4769]: W0131 16:45:15.088649 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b198919_7d1c_472e_99e9_febd4bc128da.slice/crio-1d5e8150ddb41e4d5343972327212b11137ea28e32da55477eb7240c24e89513 WatchSource:0}: Error finding container 1d5e8150ddb41e4d5343972327212b11137ea28e32da55477eb7240c24e89513: Status 404 returned error can't find the container with id 1d5e8150ddb41e4d5343972327212b11137ea28e32da55477eb7240c24e89513 Jan 31 16:45:15 crc kubenswrapper[4769]: I0131 16:45:15.387322 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-nrvsg" event={"ID":"3b198919-7d1c-472e-99e9-febd4bc128da","Type":"ContainerStarted","Data":"1d5e8150ddb41e4d5343972327212b11137ea28e32da55477eb7240c24e89513"} Jan 31 16:45:16 crc kubenswrapper[4769]: I0131 16:45:16.402574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-nrvsg" event={"ID":"3b198919-7d1c-472e-99e9-febd4bc128da","Type":"ContainerStarted","Data":"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164"} Jan 31 16:45:16 crc kubenswrapper[4769]: I0131 16:45:16.425617 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-index-nrvsg" podStartSLOduration=1.542963254 podStartE2EDuration="2.4255946s" podCreationTimestamp="2026-01-31 16:45:14 +0000 UTC" firstStartedPulling="2026-01-31 16:45:15.091247118 +0000 UTC m=+963.165415807" lastFinishedPulling="2026-01-31 16:45:15.973878474 +0000 UTC m=+964.048047153" observedRunningTime="2026-01-31 16:45:16.422806115 +0000 UTC m=+964.496974814" watchObservedRunningTime="2026-01-31 16:45:16.4255946 +0000 UTC m=+964.499763289" Jan 31 16:45:19 crc kubenswrapper[4769]: I0131 16:45:19.513346 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:19 crc kubenswrapper[4769]: I0131 16:45:19.513955 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/barbican-operator-index-nrvsg" podUID="3b198919-7d1c-472e-99e9-febd4bc128da" containerName="registry-server" containerID="cri-o://82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164" gracePeriod=2 Jan 31 16:45:19 crc kubenswrapper[4769]: I0131 16:45:19.980325 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.127743 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-index-z227n"] Jan 31 16:45:20 crc kubenswrapper[4769]: E0131 16:45:20.128264 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b198919-7d1c-472e-99e9-febd4bc128da" containerName="registry-server" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.128308 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b198919-7d1c-472e-99e9-febd4bc128da" containerName="registry-server" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.128697 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b198919-7d1c-472e-99e9-febd4bc128da" containerName="registry-server" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.129720 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.151428 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-z227n"] Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.178291 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpsw2\" (UniqueName: \"kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2\") pod \"3b198919-7d1c-472e-99e9-febd4bc128da\" (UID: \"3b198919-7d1c-472e-99e9-febd4bc128da\") " Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.179035 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwd5n\" (UniqueName: \"kubernetes.io/projected/85c08709-5d57-4967-a160-3a773cffe1c1-kube-api-access-fwd5n\") pod \"barbican-operator-index-z227n\" (UID: \"85c08709-5d57-4967-a160-3a773cffe1c1\") " pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.189875 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2" (OuterVolumeSpecName: "kube-api-access-bpsw2") pod "3b198919-7d1c-472e-99e9-febd4bc128da" (UID: "3b198919-7d1c-472e-99e9-febd4bc128da"). InnerVolumeSpecName "kube-api-access-bpsw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.280397 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwd5n\" (UniqueName: \"kubernetes.io/projected/85c08709-5d57-4967-a160-3a773cffe1c1-kube-api-access-fwd5n\") pod \"barbican-operator-index-z227n\" (UID: \"85c08709-5d57-4967-a160-3a773cffe1c1\") " pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.280652 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpsw2\" (UniqueName: \"kubernetes.io/projected/3b198919-7d1c-472e-99e9-febd4bc128da-kube-api-access-bpsw2\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.302758 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwd5n\" (UniqueName: \"kubernetes.io/projected/85c08709-5d57-4967-a160-3a773cffe1c1-kube-api-access-fwd5n\") pod \"barbican-operator-index-z227n\" (UID: \"85c08709-5d57-4967-a160-3a773cffe1c1\") " pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.438016 4769 generic.go:334] "Generic (PLEG): container finished" podID="3b198919-7d1c-472e-99e9-febd4bc128da" containerID="82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164" exitCode=0 Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.438357 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-nrvsg" event={"ID":"3b198919-7d1c-472e-99e9-febd4bc128da","Type":"ContainerDied","Data":"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164"} Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.438562 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-nrvsg" event={"ID":"3b198919-7d1c-472e-99e9-febd4bc128da","Type":"ContainerDied","Data":"1d5e8150ddb41e4d5343972327212b11137ea28e32da55477eb7240c24e89513"} Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.438720 4769 scope.go:117] "RemoveContainer" containerID="82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.438782 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-nrvsg" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.456321 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.463954 4769 scope.go:117] "RemoveContainer" containerID="82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164" Jan 31 16:45:20 crc kubenswrapper[4769]: E0131 16:45:20.464668 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164\": container with ID starting with 82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164 not found: ID does not exist" containerID="82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.464729 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164"} err="failed to get container status \"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164\": rpc error: code = NotFound desc = could not find container \"82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164\": container with ID starting with 82ea3a47422e9d0cc43537d3cf3344ca51cd29e6a07711d7cc17f19059143164 not found: ID does not exist" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.496166 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.507774 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/barbican-operator-index-nrvsg"] Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.682077 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.682487 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.682574 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.683415 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.683606 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a" gracePeriod=600 Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.726197 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b198919-7d1c-472e-99e9-febd4bc128da" path="/var/lib/kubelet/pods/3b198919-7d1c-472e-99e9-febd4bc128da/volumes" Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.903108 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-index-z227n"] Jan 31 16:45:20 crc kubenswrapper[4769]: W0131 16:45:20.911878 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85c08709_5d57_4967_a160_3a773cffe1c1.slice/crio-5791a5903bf2a2f378261aeb8472da9a974295c72deb4a627c3e141ef00f1356 WatchSource:0}: Error finding container 5791a5903bf2a2f378261aeb8472da9a974295c72deb4a627c3e141ef00f1356: Status 404 returned error can't find the container with id 5791a5903bf2a2f378261aeb8472da9a974295c72deb4a627c3e141ef00f1356 Jan 31 16:45:20 crc kubenswrapper[4769]: I0131 16:45:20.915968 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 16:45:21 crc kubenswrapper[4769]: I0131 16:45:21.448851 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-z227n" event={"ID":"85c08709-5d57-4967-a160-3a773cffe1c1","Type":"ContainerStarted","Data":"5791a5903bf2a2f378261aeb8472da9a974295c72deb4a627c3e141ef00f1356"} Jan 31 16:45:21 crc kubenswrapper[4769]: I0131 16:45:21.454948 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a" exitCode=0 Jan 31 16:45:21 crc kubenswrapper[4769]: I0131 16:45:21.454994 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a"} Jan 31 16:45:21 crc kubenswrapper[4769]: I0131 16:45:21.455024 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76"} Jan 31 16:45:21 crc kubenswrapper[4769]: I0131 16:45:21.455043 4769 scope.go:117] "RemoveContainer" containerID="468aac8c3c2e831dfe213619a8cbfe7284a5104d05804071dc210d52a0e5d3d0" Jan 31 16:45:22 crc kubenswrapper[4769]: I0131 16:45:22.474136 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-index-z227n" event={"ID":"85c08709-5d57-4967-a160-3a773cffe1c1","Type":"ContainerStarted","Data":"7b7e74521a66c13bcd3d8b116fba41e2c55d88772db52582ee4c6ae45383ac4b"} Jan 31 16:45:22 crc kubenswrapper[4769]: I0131 16:45:22.501771 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-index-z227n" podStartSLOduration=2.071918946 podStartE2EDuration="2.501739046s" podCreationTimestamp="2026-01-31 16:45:20 +0000 UTC" firstStartedPulling="2026-01-31 16:45:20.915706714 +0000 UTC m=+968.989875383" lastFinishedPulling="2026-01-31 16:45:21.345526824 +0000 UTC m=+969.419695483" observedRunningTime="2026-01-31 16:45:22.497312638 +0000 UTC m=+970.571481347" watchObservedRunningTime="2026-01-31 16:45:22.501739046 +0000 UTC m=+970.575907765" Jan 31 16:45:23 crc kubenswrapper[4769]: I0131 16:45:23.018706 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/rabbitmq-server-0" Jan 31 16:45:30 crc kubenswrapper[4769]: I0131 16:45:30.458301 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:30 crc kubenswrapper[4769]: I0131 16:45:30.459059 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:30 crc kubenswrapper[4769]: I0131 16:45:30.506943 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:30 crc kubenswrapper[4769]: I0131 16:45:30.577861 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-index-z227n" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.108957 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-db-create-k4978"] Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.109911 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.128252 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-create-k4978"] Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.135568 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh"] Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.137053 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.151941 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-db-secret" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.161880 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh"] Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.186820 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hqfx\" (UniqueName: \"kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.186950 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.288366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xkvm\" (UniqueName: \"kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.288486 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.288568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.288737 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hqfx\" (UniqueName: \"kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.289385 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.306958 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hqfx\" (UniqueName: \"kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx\") pod \"keystone-db-create-k4978\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.390594 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xkvm\" (UniqueName: \"kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.390696 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.391668 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.409880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xkvm\" (UniqueName: \"kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm\") pod \"keystone-dc33-account-create-update-mlwkh\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.463864 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.474888 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:32 crc kubenswrapper[4769]: I0131 16:45:32.896343 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh"] Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.005807 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-create-k4978"] Jan 31 16:45:33 crc kubenswrapper[4769]: W0131 16:45:33.011865 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb38780b5_a260_49f0_853b_5532643cc9c7.slice/crio-0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6 WatchSource:0}: Error finding container 0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6: Status 404 returned error can't find the container with id 0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6 Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.149778 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7"] Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.150911 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.153944 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.165169 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7"] Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.303711 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.304054 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d88w2\" (UniqueName: \"kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.304133 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.405082 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.405184 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.405247 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d88w2\" (UniqueName: \"kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.405817 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.405861 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.424694 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d88w2\" (UniqueName: \"kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2\") pod \"55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.463612 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.600900 4769 generic.go:334] "Generic (PLEG): container finished" podID="b38780b5-a260-49f0-853b-5532643cc9c7" containerID="6c49a998918c78603f4614b178c3806f4560fda87e05a1e331e4a5cc280b57f4" exitCode=0 Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.600988 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-k4978" event={"ID":"b38780b5-a260-49f0-853b-5532643cc9c7","Type":"ContainerDied","Data":"6c49a998918c78603f4614b178c3806f4560fda87e05a1e331e4a5cc280b57f4"} Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.601014 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-k4978" event={"ID":"b38780b5-a260-49f0-853b-5532643cc9c7","Type":"ContainerStarted","Data":"0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6"} Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.603152 4769 generic.go:334] "Generic (PLEG): container finished" podID="7dbac680-3b56-4116-8a9c-e41655dce740" containerID="7946243402cacce4c24d3c3cc781001e1c62bbb1b8f12f155cbdcfcd3855263b" exitCode=0 Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.603180 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" event={"ID":"7dbac680-3b56-4116-8a9c-e41655dce740","Type":"ContainerDied","Data":"7946243402cacce4c24d3c3cc781001e1c62bbb1b8f12f155cbdcfcd3855263b"} Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.603196 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" event={"ID":"7dbac680-3b56-4116-8a9c-e41655dce740","Type":"ContainerStarted","Data":"4502cb8a6720b07d21140f4d4073b8fce32af6436b9d375bd143ab6f3abaedc0"} Jan 31 16:45:33 crc kubenswrapper[4769]: I0131 16:45:33.907336 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7"] Jan 31 16:45:34 crc kubenswrapper[4769]: I0131 16:45:34.615788 4769 generic.go:334] "Generic (PLEG): container finished" podID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerID="a1cce602c882e157962118692e1f493845e08562e2b501d6020a220c0da9e9eb" exitCode=0 Jan 31 16:45:34 crc kubenswrapper[4769]: I0131 16:45:34.615909 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" event={"ID":"e0b17231-5c20-443c-8fc8-6099f8d88e96","Type":"ContainerDied","Data":"a1cce602c882e157962118692e1f493845e08562e2b501d6020a220c0da9e9eb"} Jan 31 16:45:34 crc kubenswrapper[4769]: I0131 16:45:34.616627 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" event={"ID":"e0b17231-5c20-443c-8fc8-6099f8d88e96","Type":"ContainerStarted","Data":"9c37cd2c3f47001ba5294bc4e36de40d3d64f4c7db10a045c44e4959a62e92ea"} Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.044758 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.061784 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.151508 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hqfx\" (UniqueName: \"kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx\") pod \"b38780b5-a260-49f0-853b-5532643cc9c7\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.151554 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xkvm\" (UniqueName: \"kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm\") pod \"7dbac680-3b56-4116-8a9c-e41655dce740\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.151613 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts\") pod \"7dbac680-3b56-4116-8a9c-e41655dce740\" (UID: \"7dbac680-3b56-4116-8a9c-e41655dce740\") " Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.151651 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts\") pod \"b38780b5-a260-49f0-853b-5532643cc9c7\" (UID: \"b38780b5-a260-49f0-853b-5532643cc9c7\") " Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.152326 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7dbac680-3b56-4116-8a9c-e41655dce740" (UID: "7dbac680-3b56-4116-8a9c-e41655dce740"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.152356 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b38780b5-a260-49f0-853b-5532643cc9c7" (UID: "b38780b5-a260-49f0-853b-5532643cc9c7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.157860 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm" (OuterVolumeSpecName: "kube-api-access-6xkvm") pod "7dbac680-3b56-4116-8a9c-e41655dce740" (UID: "7dbac680-3b56-4116-8a9c-e41655dce740"). InnerVolumeSpecName "kube-api-access-6xkvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.158711 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx" (OuterVolumeSpecName: "kube-api-access-5hqfx") pod "b38780b5-a260-49f0-853b-5532643cc9c7" (UID: "b38780b5-a260-49f0-853b-5532643cc9c7"). InnerVolumeSpecName "kube-api-access-5hqfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.253948 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xkvm\" (UniqueName: \"kubernetes.io/projected/7dbac680-3b56-4116-8a9c-e41655dce740-kube-api-access-6xkvm\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.253988 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7dbac680-3b56-4116-8a9c-e41655dce740-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.253997 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b38780b5-a260-49f0-853b-5532643cc9c7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.254006 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hqfx\" (UniqueName: \"kubernetes.io/projected/b38780b5-a260-49f0-853b-5532643cc9c7-kube-api-access-5hqfx\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.627008 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-create-k4978" event={"ID":"b38780b5-a260-49f0-853b-5532643cc9c7","Type":"ContainerDied","Data":"0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6"} Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.627064 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e67fd82f56da50fbced0856ce64d9ae182726e14c7f1e80e17b109e78de1ef6" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.627151 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-create-k4978" Jan 31 16:45:35 crc kubenswrapper[4769]: E0131 16:45:35.631284 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0b17231_5c20_443c_8fc8_6099f8d88e96.slice/crio-conmon-052582a02e952a0d1accf2090ab0b97b65e35a95952aca55656db67ff499bc5c.scope\": RecentStats: unable to find data in memory cache]" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.631690 4769 generic.go:334] "Generic (PLEG): container finished" podID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerID="052582a02e952a0d1accf2090ab0b97b65e35a95952aca55656db67ff499bc5c" exitCode=0 Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.631759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" event={"ID":"e0b17231-5c20-443c-8fc8-6099f8d88e96","Type":"ContainerDied","Data":"052582a02e952a0d1accf2090ab0b97b65e35a95952aca55656db67ff499bc5c"} Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.634108 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" event={"ID":"7dbac680-3b56-4116-8a9c-e41655dce740","Type":"ContainerDied","Data":"4502cb8a6720b07d21140f4d4073b8fce32af6436b9d375bd143ab6f3abaedc0"} Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.634139 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh" Jan 31 16:45:35 crc kubenswrapper[4769]: I0131 16:45:35.634158 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4502cb8a6720b07d21140f4d4073b8fce32af6436b9d375bd143ab6f3abaedc0" Jan 31 16:45:36 crc kubenswrapper[4769]: I0131 16:45:36.669958 4769 generic.go:334] "Generic (PLEG): container finished" podID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerID="d0182329ca5db559266e64502225b28318391e5465f5ccab3dbac226e45b28ce" exitCode=0 Jan 31 16:45:36 crc kubenswrapper[4769]: I0131 16:45:36.671268 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" event={"ID":"e0b17231-5c20-443c-8fc8-6099f8d88e96","Type":"ContainerDied","Data":"d0182329ca5db559266e64502225b28318391e5465f5ccab3dbac226e45b28ce"} Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.736025 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-db-sync-25psv"] Jan 31 16:45:37 crc kubenswrapper[4769]: E0131 16:45:37.737265 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dbac680-3b56-4116-8a9c-e41655dce740" containerName="mariadb-account-create-update" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.737301 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dbac680-3b56-4116-8a9c-e41655dce740" containerName="mariadb-account-create-update" Jan 31 16:45:37 crc kubenswrapper[4769]: E0131 16:45:37.737333 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b38780b5-a260-49f0-853b-5532643cc9c7" containerName="mariadb-database-create" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.737347 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b38780b5-a260-49f0-853b-5532643cc9c7" containerName="mariadb-database-create" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.737620 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b38780b5-a260-49f0-853b-5532643cc9c7" containerName="mariadb-database-create" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.737650 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dbac680-3b56-4116-8a9c-e41655dce740" containerName="mariadb-account-create-update" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.738391 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.740891 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.740947 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.742912 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-wdtvk" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.744119 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.752736 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-25psv"] Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.888314 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7vdn\" (UniqueName: \"kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.888410 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.990185 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7vdn\" (UniqueName: \"kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:37 crc kubenswrapper[4769]: I0131 16:45:37.990905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.010480 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.023030 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7vdn\" (UniqueName: \"kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn\") pod \"keystone-db-sync-25psv\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.057610 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.067307 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.193766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util\") pod \"e0b17231-5c20-443c-8fc8-6099f8d88e96\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.193838 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle\") pod \"e0b17231-5c20-443c-8fc8-6099f8d88e96\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.193879 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d88w2\" (UniqueName: \"kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2\") pod \"e0b17231-5c20-443c-8fc8-6099f8d88e96\" (UID: \"e0b17231-5c20-443c-8fc8-6099f8d88e96\") " Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.195478 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle" (OuterVolumeSpecName: "bundle") pod "e0b17231-5c20-443c-8fc8-6099f8d88e96" (UID: "e0b17231-5c20-443c-8fc8-6099f8d88e96"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.200443 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2" (OuterVolumeSpecName: "kube-api-access-d88w2") pod "e0b17231-5c20-443c-8fc8-6099f8d88e96" (UID: "e0b17231-5c20-443c-8fc8-6099f8d88e96"). InnerVolumeSpecName "kube-api-access-d88w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.208835 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util" (OuterVolumeSpecName: "util") pod "e0b17231-5c20-443c-8fc8-6099f8d88e96" (UID: "e0b17231-5c20-443c-8fc8-6099f8d88e96"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.295631 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.295661 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e0b17231-5c20-443c-8fc8-6099f8d88e96-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.295672 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d88w2\" (UniqueName: \"kubernetes.io/projected/e0b17231-5c20-443c-8fc8-6099f8d88e96-kube-api-access-d88w2\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.500611 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-25psv"] Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.686406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" event={"ID":"e0b17231-5c20-443c-8fc8-6099f8d88e96","Type":"ContainerDied","Data":"9c37cd2c3f47001ba5294bc4e36de40d3d64f4c7db10a045c44e4959a62e92ea"} Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.686447 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c37cd2c3f47001ba5294bc4e36de40d3d64f4c7db10a045c44e4959a62e92ea" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.686525 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7" Jan 31 16:45:38 crc kubenswrapper[4769]: I0131 16:45:38.687301 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-25psv" event={"ID":"70c8524d-a0d7-40ce-8fcf-070cf0348c60","Type":"ContainerStarted","Data":"47c24f4bce8ed096b08ad8ac169189dd1937a6129fb47a8b0d4541d75113a6d2"} Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.826410 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv"] Jan 31 16:45:46 crc kubenswrapper[4769]: E0131 16:45:46.827196 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="util" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.827209 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="util" Jan 31 16:45:46 crc kubenswrapper[4769]: E0131 16:45:46.827221 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="extract" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.827227 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="extract" Jan 31 16:45:46 crc kubenswrapper[4769]: E0131 16:45:46.827238 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="pull" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.827244 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="pull" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.827370 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0b17231-5c20-443c-8fc8-6099f8d88e96" containerName="extract" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.827770 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.829403 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-4grmm" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.829735 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-service-cert" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.838035 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv"] Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.929340 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gmlv\" (UniqueName: \"kubernetes.io/projected/a992acae-b11b-404d-b2c1-9b26f97725e8-kube-api-access-9gmlv\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.929527 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-apiservice-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:46 crc kubenswrapper[4769]: I0131 16:45:46.929607 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-webhook-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.032172 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-apiservice-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.032283 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-webhook-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.032356 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gmlv\" (UniqueName: \"kubernetes.io/projected/a992acae-b11b-404d-b2c1-9b26f97725e8-kube-api-access-9gmlv\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.040370 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-apiservice-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.050948 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a992acae-b11b-404d-b2c1-9b26f97725e8-webhook-cert\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.051425 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gmlv\" (UniqueName: \"kubernetes.io/projected/a992acae-b11b-404d-b2c1-9b26f97725e8-kube-api-access-9gmlv\") pod \"barbican-operator-controller-manager-5bfd447ff5-zwcfv\" (UID: \"a992acae-b11b-404d-b2c1-9b26f97725e8\") " pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:47 crc kubenswrapper[4769]: I0131 16:45:47.142818 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:49 crc kubenswrapper[4769]: I0131 16:45:49.113221 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv"] Jan 31 16:45:49 crc kubenswrapper[4769]: W0131 16:45:49.117607 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda992acae_b11b_404d_b2c1_9b26f97725e8.slice/crio-c2d67d7f7e8b35b958949fdd9b5ab00853c5aad74d4d6ee1ad98ed85ceb23dc6 WatchSource:0}: Error finding container c2d67d7f7e8b35b958949fdd9b5ab00853c5aad74d4d6ee1ad98ed85ceb23dc6: Status 404 returned error can't find the container with id c2d67d7f7e8b35b958949fdd9b5ab00853c5aad74d4d6ee1ad98ed85ceb23dc6 Jan 31 16:45:49 crc kubenswrapper[4769]: I0131 16:45:49.771126 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" event={"ID":"a992acae-b11b-404d-b2c1-9b26f97725e8","Type":"ContainerStarted","Data":"c2d67d7f7e8b35b958949fdd9b5ab00853c5aad74d4d6ee1ad98ed85ceb23dc6"} Jan 31 16:45:49 crc kubenswrapper[4769]: I0131 16:45:49.772517 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-25psv" event={"ID":"70c8524d-a0d7-40ce-8fcf-070cf0348c60","Type":"ContainerStarted","Data":"213308783e2a3f64ff3763f966ed828e22e23274f00477b8a4850379f417410e"} Jan 31 16:45:51 crc kubenswrapper[4769]: I0131 16:45:51.786457 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" event={"ID":"a992acae-b11b-404d-b2c1-9b26f97725e8","Type":"ContainerStarted","Data":"b5ea2faf3e06142a1963e5066cfee915eb84b949cb7fae121f3878dffbabd62f"} Jan 31 16:45:51 crc kubenswrapper[4769]: I0131 16:45:51.786649 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:51 crc kubenswrapper[4769]: I0131 16:45:51.806190 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" podStartSLOduration=3.929582711 podStartE2EDuration="5.806174282s" podCreationTimestamp="2026-01-31 16:45:46 +0000 UTC" firstStartedPulling="2026-01-31 16:45:49.119680354 +0000 UTC m=+997.193849023" lastFinishedPulling="2026-01-31 16:45:50.996271925 +0000 UTC m=+999.070440594" observedRunningTime="2026-01-31 16:45:51.802862053 +0000 UTC m=+999.877030722" watchObservedRunningTime="2026-01-31 16:45:51.806174282 +0000 UTC m=+999.880343031" Jan 31 16:45:51 crc kubenswrapper[4769]: I0131 16:45:51.806541 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-db-sync-25psv" podStartSLOduration=4.545188252 podStartE2EDuration="14.806537252s" podCreationTimestamp="2026-01-31 16:45:37 +0000 UTC" firstStartedPulling="2026-01-31 16:45:38.505551106 +0000 UTC m=+986.579719815" lastFinishedPulling="2026-01-31 16:45:48.766900146 +0000 UTC m=+996.841068815" observedRunningTime="2026-01-31 16:45:49.793255225 +0000 UTC m=+997.867423914" watchObservedRunningTime="2026-01-31 16:45:51.806537252 +0000 UTC m=+999.880705921" Jan 31 16:45:52 crc kubenswrapper[4769]: I0131 16:45:52.796729 4769 generic.go:334] "Generic (PLEG): container finished" podID="70c8524d-a0d7-40ce-8fcf-070cf0348c60" containerID="213308783e2a3f64ff3763f966ed828e22e23274f00477b8a4850379f417410e" exitCode=0 Jan 31 16:45:52 crc kubenswrapper[4769]: I0131 16:45:52.796826 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-25psv" event={"ID":"70c8524d-a0d7-40ce-8fcf-070cf0348c60","Type":"ContainerDied","Data":"213308783e2a3f64ff3763f966ed828e22e23274f00477b8a4850379f417410e"} Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.163077 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.257632 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7vdn\" (UniqueName: \"kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn\") pod \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.257790 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data\") pod \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\" (UID: \"70c8524d-a0d7-40ce-8fcf-070cf0348c60\") " Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.263175 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn" (OuterVolumeSpecName: "kube-api-access-h7vdn") pod "70c8524d-a0d7-40ce-8fcf-070cf0348c60" (UID: "70c8524d-a0d7-40ce-8fcf-070cf0348c60"). InnerVolumeSpecName "kube-api-access-h7vdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.299242 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data" (OuterVolumeSpecName: "config-data") pod "70c8524d-a0d7-40ce-8fcf-070cf0348c60" (UID: "70c8524d-a0d7-40ce-8fcf-070cf0348c60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.359463 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7vdn\" (UniqueName: \"kubernetes.io/projected/70c8524d-a0d7-40ce-8fcf-070cf0348c60-kube-api-access-h7vdn\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.359533 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70c8524d-a0d7-40ce-8fcf-070cf0348c60-config-data\") on node \"crc\" DevicePath \"\"" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.812737 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-db-sync-25psv" event={"ID":"70c8524d-a0d7-40ce-8fcf-070cf0348c60","Type":"ContainerDied","Data":"47c24f4bce8ed096b08ad8ac169189dd1937a6129fb47a8b0d4541d75113a6d2"} Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.813102 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47c24f4bce8ed096b08ad8ac169189dd1937a6129fb47a8b0d4541d75113a6d2" Jan 31 16:45:54 crc kubenswrapper[4769]: I0131 16:45:54.812814 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-db-sync-25psv" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.007321 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-9xtg8"] Jan 31 16:45:55 crc kubenswrapper[4769]: E0131 16:45:55.007874 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70c8524d-a0d7-40ce-8fcf-070cf0348c60" containerName="keystone-db-sync" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.007964 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="70c8524d-a0d7-40ce-8fcf-070cf0348c60" containerName="keystone-db-sync" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.008189 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="70c8524d-a0d7-40ce-8fcf-070cf0348c60" containerName="keystone-db-sync" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.008795 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.011322 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.011449 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.011686 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"osp-secret" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.012544 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-wdtvk" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.018420 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.029781 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-9xtg8"] Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.070228 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7dfp\" (UniqueName: \"kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.070294 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.070319 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.070346 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.070518 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.171639 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.171702 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7dfp\" (UniqueName: \"kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.171726 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.171741 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.171761 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.175156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.175939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.176287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.184852 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.186942 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7dfp\" (UniqueName: \"kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp\") pod \"keystone-bootstrap-9xtg8\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.331213 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.765485 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-9xtg8"] Jan 31 16:45:55 crc kubenswrapper[4769]: W0131 16:45:55.773455 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36f36d22_0f23_4385_af4a_d31963ab0dbd.slice/crio-7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb WatchSource:0}: Error finding container 7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb: Status 404 returned error can't find the container with id 7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb Jan 31 16:45:55 crc kubenswrapper[4769]: I0131 16:45:55.821453 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" event={"ID":"36f36d22-0f23-4385-af4a-d31963ab0dbd","Type":"ContainerStarted","Data":"7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb"} Jan 31 16:45:56 crc kubenswrapper[4769]: I0131 16:45:56.832294 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" event={"ID":"36f36d22-0f23-4385-af4a-d31963ab0dbd","Type":"ContainerStarted","Data":"2ff432476c66bae5134c3131b0251fdf5759c0ffefd1aa82450118befb4e63ec"} Jan 31 16:45:56 crc kubenswrapper[4769]: I0131 16:45:56.864427 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" podStartSLOduration=2.864393106 podStartE2EDuration="2.864393106s" podCreationTimestamp="2026-01-31 16:45:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:45:56.852909079 +0000 UTC m=+1004.927077788" watchObservedRunningTime="2026-01-31 16:45:56.864393106 +0000 UTC m=+1004.938561815" Jan 31 16:45:57 crc kubenswrapper[4769]: I0131 16:45:57.148252 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5bfd447ff5-zwcfv" Jan 31 16:45:58 crc kubenswrapper[4769]: I0131 16:45:58.848805 4769 generic.go:334] "Generic (PLEG): container finished" podID="36f36d22-0f23-4385-af4a-d31963ab0dbd" containerID="2ff432476c66bae5134c3131b0251fdf5759c0ffefd1aa82450118befb4e63ec" exitCode=0 Jan 31 16:45:58 crc kubenswrapper[4769]: I0131 16:45:58.848884 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" event={"ID":"36f36d22-0f23-4385-af4a-d31963ab0dbd","Type":"ContainerDied","Data":"2ff432476c66bae5134c3131b0251fdf5759c0ffefd1aa82450118befb4e63ec"} Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.221186 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.264295 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys\") pod \"36f36d22-0f23-4385-af4a-d31963ab0dbd\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.264332 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys\") pod \"36f36d22-0f23-4385-af4a-d31963ab0dbd\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.270251 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "36f36d22-0f23-4385-af4a-d31963ab0dbd" (UID: "36f36d22-0f23-4385-af4a-d31963ab0dbd"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.270676 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "36f36d22-0f23-4385-af4a-d31963ab0dbd" (UID: "36f36d22-0f23-4385-af4a-d31963ab0dbd"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.365455 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7dfp\" (UniqueName: \"kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp\") pod \"36f36d22-0f23-4385-af4a-d31963ab0dbd\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.365802 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data\") pod \"36f36d22-0f23-4385-af4a-d31963ab0dbd\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.365847 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts\") pod \"36f36d22-0f23-4385-af4a-d31963ab0dbd\" (UID: \"36f36d22-0f23-4385-af4a-d31963ab0dbd\") " Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.366303 4769 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.366337 4769 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.369694 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts" (OuterVolumeSpecName: "scripts") pod "36f36d22-0f23-4385-af4a-d31963ab0dbd" (UID: "36f36d22-0f23-4385-af4a-d31963ab0dbd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.372784 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp" (OuterVolumeSpecName: "kube-api-access-l7dfp") pod "36f36d22-0f23-4385-af4a-d31963ab0dbd" (UID: "36f36d22-0f23-4385-af4a-d31963ab0dbd"). InnerVolumeSpecName "kube-api-access-l7dfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.390128 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data" (OuterVolumeSpecName: "config-data") pod "36f36d22-0f23-4385-af4a-d31963ab0dbd" (UID: "36f36d22-0f23-4385-af4a-d31963ab0dbd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.467206 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-config-data\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.467243 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36f36d22-0f23-4385-af4a-d31963ab0dbd-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.467254 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7dfp\" (UniqueName: \"kubernetes.io/projected/36f36d22-0f23-4385-af4a-d31963ab0dbd-kube-api-access-l7dfp\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.869191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" event={"ID":"36f36d22-0f23-4385-af4a-d31963ab0dbd","Type":"ContainerDied","Data":"7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb"} Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.869627 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7be544b07e1c782fc77a63094d06cc01e7f8b3bf2dae42708f7ac5389429f2eb" Jan 31 16:46:00 crc kubenswrapper[4769]: I0131 16:46:00.869319 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-bootstrap-9xtg8" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.310513 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-c9ff7c785-ktbq4"] Jan 31 16:46:01 crc kubenswrapper[4769]: E0131 16:46:01.310788 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f36d22-0f23-4385-af4a-d31963ab0dbd" containerName="keystone-bootstrap" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.310805 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f36d22-0f23-4385-af4a-d31963ab0dbd" containerName="keystone-bootstrap" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.310954 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="36f36d22-0f23-4385-af4a-d31963ab0dbd" containerName="keystone-bootstrap" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.311652 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.313652 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.313680 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-keystone-dockercfg-wdtvk" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.314014 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-config-data" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.321731 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-c9ff7c785-ktbq4"] Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.325675 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"keystone-scripts" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.381443 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmqss\" (UniqueName: \"kubernetes.io/projected/8b05a639-8fd8-4f42-97e8-e946783ae05d-kube-api-access-zmqss\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.381559 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-config-data\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.381616 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-scripts\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.381659 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-fernet-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.381702 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-credential-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.483092 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-credential-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.483184 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmqss\" (UniqueName: \"kubernetes.io/projected/8b05a639-8fd8-4f42-97e8-e946783ae05d-kube-api-access-zmqss\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.483306 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-config-data\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.483416 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-scripts\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.483554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-fernet-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.489940 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-credential-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.490596 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-config-data\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.491255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-scripts\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.497624 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8b05a639-8fd8-4f42-97e8-e946783ae05d-fernet-keys\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.500784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmqss\" (UniqueName: \"kubernetes.io/projected/8b05a639-8fd8-4f42-97e8-e946783ae05d-kube-api-access-zmqss\") pod \"keystone-c9ff7c785-ktbq4\" (UID: \"8b05a639-8fd8-4f42-97e8-e946783ae05d\") " pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:01 crc kubenswrapper[4769]: I0131 16:46:01.633239 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:02 crc kubenswrapper[4769]: I0131 16:46:02.032788 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-c9ff7c785-ktbq4"] Jan 31 16:46:02 crc kubenswrapper[4769]: I0131 16:46:02.884609 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" event={"ID":"8b05a639-8fd8-4f42-97e8-e946783ae05d","Type":"ContainerStarted","Data":"ce9a7a84376e04e96dcceda70a4763915d44d6e5931635b34a43067a21f041a4"} Jan 31 16:46:02 crc kubenswrapper[4769]: I0131 16:46:02.885106 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:02 crc kubenswrapper[4769]: I0131 16:46:02.885169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" event={"ID":"8b05a639-8fd8-4f42-97e8-e946783ae05d","Type":"ContainerStarted","Data":"b4e0f5ce11ba5905c2667d4f414927f3fb61ce398115daab03aa34accbcce8c8"} Jan 31 16:46:02 crc kubenswrapper[4769]: I0131 16:46:02.909784 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" podStartSLOduration=1.909754472 podStartE2EDuration="1.909754472s" podCreationTimestamp="2026-01-31 16:46:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:46:02.904365378 +0000 UTC m=+1010.978534067" watchObservedRunningTime="2026-01-31 16:46:02.909754472 +0000 UTC m=+1010.983923181" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.523656 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf"] Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.524831 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.529689 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-db-create-8hxbg"] Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.530144 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-db-secret" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.530859 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.536122 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf"] Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.545208 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-create-8hxbg"] Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.713551 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnkxq\" (UniqueName: \"kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.713762 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k5nk\" (UniqueName: \"kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.713885 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.713950 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.815533 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.815591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.815630 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnkxq\" (UniqueName: \"kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.815720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k5nk\" (UniqueName: \"kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.816635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.816673 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.835735 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnkxq\" (UniqueName: \"kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq\") pod \"barbican-a94c-account-create-update-xwnsf\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.838075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k5nk\" (UniqueName: \"kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk\") pod \"barbican-db-create-8hxbg\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.847945 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:03 crc kubenswrapper[4769]: I0131 16:46:03.856348 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:04 crc kubenswrapper[4769]: I0131 16:46:04.289181 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf"] Jan 31 16:46:04 crc kubenswrapper[4769]: W0131 16:46:04.310915 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4467a7c3_e52f_4518_93b4_86d4e355dd29.slice/crio-1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae WatchSource:0}: Error finding container 1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae: Status 404 returned error can't find the container with id 1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae Jan 31 16:46:04 crc kubenswrapper[4769]: I0131 16:46:04.380635 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-create-8hxbg"] Jan 31 16:46:04 crc kubenswrapper[4769]: W0131 16:46:04.392532 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod958897d4_0af2_49e6_b39e_2a0111da08fa.slice/crio-4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0 WatchSource:0}: Error finding container 4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0: Status 404 returned error can't find the container with id 4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0 Jan 31 16:46:04 crc kubenswrapper[4769]: I0131 16:46:04.916278 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-8hxbg" event={"ID":"958897d4-0af2-49e6-b39e-2a0111da08fa","Type":"ContainerStarted","Data":"4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0"} Jan 31 16:46:04 crc kubenswrapper[4769]: I0131 16:46:04.919466 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" event={"ID":"4467a7c3-e52f-4518-93b4-86d4e355dd29","Type":"ContainerStarted","Data":"1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae"} Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.319466 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-index-8j8j5"] Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.320266 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.323287 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-index-dockercfg-sshxm" Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.334412 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-8j8j5"] Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.462185 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdgxg\" (UniqueName: \"kubernetes.io/projected/496e46ab-9926-47d6-9c4e-02b40dee7ffa-kube-api-access-rdgxg\") pod \"swift-operator-index-8j8j5\" (UID: \"496e46ab-9926-47d6-9c4e-02b40dee7ffa\") " pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.564125 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdgxg\" (UniqueName: \"kubernetes.io/projected/496e46ab-9926-47d6-9c4e-02b40dee7ffa-kube-api-access-rdgxg\") pod \"swift-operator-index-8j8j5\" (UID: \"496e46ab-9926-47d6-9c4e-02b40dee7ffa\") " pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.584148 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdgxg\" (UniqueName: \"kubernetes.io/projected/496e46ab-9926-47d6-9c4e-02b40dee7ffa-kube-api-access-rdgxg\") pod \"swift-operator-index-8j8j5\" (UID: \"496e46ab-9926-47d6-9c4e-02b40dee7ffa\") " pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:06 crc kubenswrapper[4769]: I0131 16:46:06.644063 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.078425 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-index-8j8j5"] Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.945239 4769 generic.go:334] "Generic (PLEG): container finished" podID="958897d4-0af2-49e6-b39e-2a0111da08fa" containerID="4cef552941eb1f7a0ea1810009f1879b3c41600428b341a5e1b7f64716105bf4" exitCode=0 Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.945337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-8hxbg" event={"ID":"958897d4-0af2-49e6-b39e-2a0111da08fa","Type":"ContainerDied","Data":"4cef552941eb1f7a0ea1810009f1879b3c41600428b341a5e1b7f64716105bf4"} Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.947714 4769 generic.go:334] "Generic (PLEG): container finished" podID="4467a7c3-e52f-4518-93b4-86d4e355dd29" containerID="4c1dd8f9771ca4c3f7f0837f1873760f93f714c33e5dc999e3bf26bdcec47870" exitCode=0 Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.947784 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" event={"ID":"4467a7c3-e52f-4518-93b4-86d4e355dd29","Type":"ContainerDied","Data":"4c1dd8f9771ca4c3f7f0837f1873760f93f714c33e5dc999e3bf26bdcec47870"} Jan 31 16:46:07 crc kubenswrapper[4769]: I0131 16:46:07.949041 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-8j8j5" event={"ID":"496e46ab-9926-47d6-9c4e-02b40dee7ffa","Type":"ContainerStarted","Data":"862dcb49b6d525228446c5a3165facbcf2576fba91668b2efd7498c35428f27a"} Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.628327 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.638024 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.818426 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnkxq\" (UniqueName: \"kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq\") pod \"4467a7c3-e52f-4518-93b4-86d4e355dd29\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.818554 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k5nk\" (UniqueName: \"kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk\") pod \"958897d4-0af2-49e6-b39e-2a0111da08fa\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.818685 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts\") pod \"4467a7c3-e52f-4518-93b4-86d4e355dd29\" (UID: \"4467a7c3-e52f-4518-93b4-86d4e355dd29\") " Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.818749 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts\") pod \"958897d4-0af2-49e6-b39e-2a0111da08fa\" (UID: \"958897d4-0af2-49e6-b39e-2a0111da08fa\") " Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.819381 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4467a7c3-e52f-4518-93b4-86d4e355dd29" (UID: "4467a7c3-e52f-4518-93b4-86d4e355dd29"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.819472 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "958897d4-0af2-49e6-b39e-2a0111da08fa" (UID: "958897d4-0af2-49e6-b39e-2a0111da08fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.820094 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4467a7c3-e52f-4518-93b4-86d4e355dd29-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.820123 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958897d4-0af2-49e6-b39e-2a0111da08fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.824880 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq" (OuterVolumeSpecName: "kube-api-access-rnkxq") pod "4467a7c3-e52f-4518-93b4-86d4e355dd29" (UID: "4467a7c3-e52f-4518-93b4-86d4e355dd29"). InnerVolumeSpecName "kube-api-access-rnkxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.825431 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk" (OuterVolumeSpecName: "kube-api-access-6k5nk") pod "958897d4-0af2-49e6-b39e-2a0111da08fa" (UID: "958897d4-0af2-49e6-b39e-2a0111da08fa"). InnerVolumeSpecName "kube-api-access-6k5nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.921538 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnkxq\" (UniqueName: \"kubernetes.io/projected/4467a7c3-e52f-4518-93b4-86d4e355dd29-kube-api-access-rnkxq\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.921589 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k5nk\" (UniqueName: \"kubernetes.io/projected/958897d4-0af2-49e6-b39e-2a0111da08fa-kube-api-access-6k5nk\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.966797 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" event={"ID":"4467a7c3-e52f-4518-93b4-86d4e355dd29","Type":"ContainerDied","Data":"1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae"} Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.966843 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ad16ee1ff16a253faaa365a2e50f4e0b6939466a5e23b32d92c3e7208c200ae" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.966864 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.968620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-index-8j8j5" event={"ID":"496e46ab-9926-47d6-9c4e-02b40dee7ffa","Type":"ContainerStarted","Data":"21ef7214d5efe3d4826ae2f535e3c21a017507c9129c85836ecaa08aefa2ec03"} Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.971284 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-create-8hxbg" event={"ID":"958897d4-0af2-49e6-b39e-2a0111da08fa","Type":"ContainerDied","Data":"4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0"} Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.971326 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4706ffbb8ed3cc33d25f7d6922d6ef9089bde32ea661da6ab3cd8b9010fa14b0" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.971374 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-create-8hxbg" Jan 31 16:46:09 crc kubenswrapper[4769]: I0131 16:46:09.996895 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-index-8j8j5" podStartSLOduration=1.368063767 podStartE2EDuration="3.996866618s" podCreationTimestamp="2026-01-31 16:46:06 +0000 UTC" firstStartedPulling="2026-01-31 16:46:07.085223725 +0000 UTC m=+1015.159392394" lastFinishedPulling="2026-01-31 16:46:09.714026566 +0000 UTC m=+1017.788195245" observedRunningTime="2026-01-31 16:46:09.987287523 +0000 UTC m=+1018.061456262" watchObservedRunningTime="2026-01-31 16:46:09.996866618 +0000 UTC m=+1018.071035317" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.844041 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-db-sync-zzwxm"] Jan 31 16:46:13 crc kubenswrapper[4769]: E0131 16:46:13.845044 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="958897d4-0af2-49e6-b39e-2a0111da08fa" containerName="mariadb-database-create" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.845066 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="958897d4-0af2-49e6-b39e-2a0111da08fa" containerName="mariadb-database-create" Jan 31 16:46:13 crc kubenswrapper[4769]: E0131 16:46:13.845100 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4467a7c3-e52f-4518-93b4-86d4e355dd29" containerName="mariadb-account-create-update" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.845115 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4467a7c3-e52f-4518-93b4-86d4e355dd29" containerName="mariadb-account-create-update" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.845351 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="958897d4-0af2-49e6-b39e-2a0111da08fa" containerName="mariadb-database-create" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.845368 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4467a7c3-e52f-4518-93b4-86d4e355dd29" containerName="mariadb-account-create-update" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.846171 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.848673 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-config-data" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.849070 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-barbican-dockercfg-fxf82" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.853602 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-zzwxm"] Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.982249 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ghxs\" (UniqueName: \"kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:13 crc kubenswrapper[4769]: I0131 16:46:13.982886 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.084607 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ghxs\" (UniqueName: \"kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.084680 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.092177 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.104616 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ghxs\" (UniqueName: \"kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs\") pod \"barbican-db-sync-zzwxm\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.178356 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:14 crc kubenswrapper[4769]: I0131 16:46:14.599294 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-db-sync-zzwxm"] Jan 31 16:46:15 crc kubenswrapper[4769]: I0131 16:46:15.027033 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" event={"ID":"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c","Type":"ContainerStarted","Data":"de4f9abaafc77ad67a8849b411e6df1c3872b8b7a5222b7a1a521c7bc913807e"} Jan 31 16:46:16 crc kubenswrapper[4769]: I0131 16:46:16.644379 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:16 crc kubenswrapper[4769]: I0131 16:46:16.644428 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:16 crc kubenswrapper[4769]: I0131 16:46:16.688629 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:17 crc kubenswrapper[4769]: I0131 16:46:17.080587 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-index-8j8j5" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.673703 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz"] Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.675379 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz"] Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.675747 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.679392 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6tzc" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.863227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxjhw\" (UniqueName: \"kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.863333 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.863575 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.964988 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.965082 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxjhw\" (UniqueName: \"kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.965136 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.965820 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:18 crc kubenswrapper[4769]: I0131 16:46:18.965848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:19 crc kubenswrapper[4769]: I0131 16:46:19.006708 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxjhw\" (UniqueName: \"kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw\") pod \"8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:19 crc kubenswrapper[4769]: I0131 16:46:19.050120 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:19 crc kubenswrapper[4769]: I0131 16:46:19.065885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" event={"ID":"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c","Type":"ContainerStarted","Data":"82cc3e7a861d4ce60a4581b309f3ede7f9b27cba287488a722489aa32313ca47"} Jan 31 16:46:19 crc kubenswrapper[4769]: I0131 16:46:19.093615 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" podStartSLOduration=2.083660783 podStartE2EDuration="6.093588082s" podCreationTimestamp="2026-01-31 16:46:13 +0000 UTC" firstStartedPulling="2026-01-31 16:46:14.608516063 +0000 UTC m=+1022.682684742" lastFinishedPulling="2026-01-31 16:46:18.618443362 +0000 UTC m=+1026.692612041" observedRunningTime="2026-01-31 16:46:19.085678331 +0000 UTC m=+1027.159847080" watchObservedRunningTime="2026-01-31 16:46:19.093588082 +0000 UTC m=+1027.167756761" Jan 31 16:46:19 crc kubenswrapper[4769]: I0131 16:46:19.491458 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz"] Jan 31 16:46:20 crc kubenswrapper[4769]: I0131 16:46:20.075324 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerID="24785a8611d7a2dfc552f0adcfcb0c1d897beee1bb466ed1244fcae38c1c6ae1" exitCode=0 Jan 31 16:46:20 crc kubenswrapper[4769]: I0131 16:46:20.075430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" event={"ID":"2fde53db-e7ae-4954-a4e8-2c3c28312031","Type":"ContainerDied","Data":"24785a8611d7a2dfc552f0adcfcb0c1d897beee1bb466ed1244fcae38c1c6ae1"} Jan 31 16:46:20 crc kubenswrapper[4769]: I0131 16:46:20.075913 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" event={"ID":"2fde53db-e7ae-4954-a4e8-2c3c28312031","Type":"ContainerStarted","Data":"251bdd539ababcea7789433b4c8403500fbfd77b03f4d976236d1f1886263540"} Jan 31 16:46:21 crc kubenswrapper[4769]: I0131 16:46:21.090045 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerID="0dc0a98b8a75bbce04667ac4f1a0979abfd218855d4ea351a7ec8255ea48f984" exitCode=0 Jan 31 16:46:21 crc kubenswrapper[4769]: I0131 16:46:21.090094 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" event={"ID":"2fde53db-e7ae-4954-a4e8-2c3c28312031","Type":"ContainerDied","Data":"0dc0a98b8a75bbce04667ac4f1a0979abfd218855d4ea351a7ec8255ea48f984"} Jan 31 16:46:22 crc kubenswrapper[4769]: I0131 16:46:22.100449 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerID="c647d8dad435a7411ab01dc6385af400fc29247c1d638cc4ed43a7fbca7a6cc9" exitCode=0 Jan 31 16:46:22 crc kubenswrapper[4769]: I0131 16:46:22.100575 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" event={"ID":"2fde53db-e7ae-4954-a4e8-2c3c28312031","Type":"ContainerDied","Data":"c647d8dad435a7411ab01dc6385af400fc29247c1d638cc4ed43a7fbca7a6cc9"} Jan 31 16:46:22 crc kubenswrapper[4769]: I0131 16:46:22.102239 4769 generic.go:334] "Generic (PLEG): container finished" podID="7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" containerID="82cc3e7a861d4ce60a4581b309f3ede7f9b27cba287488a722489aa32313ca47" exitCode=0 Jan 31 16:46:22 crc kubenswrapper[4769]: I0131 16:46:22.102280 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" event={"ID":"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c","Type":"ContainerDied","Data":"82cc3e7a861d4ce60a4581b309f3ede7f9b27cba287488a722489aa32313ca47"} Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.369993 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.438925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data\") pod \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.438988 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ghxs\" (UniqueName: \"kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs\") pod \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\" (UID: \"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c\") " Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.444293 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" (UID: "7dbe76af-a39e-4a34-ae20-cc96a79e6b6c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.445011 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs" (OuterVolumeSpecName: "kube-api-access-7ghxs") pod "7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" (UID: "7dbe76af-a39e-4a34-ae20-cc96a79e6b6c"). InnerVolumeSpecName "kube-api-access-7ghxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.503601 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.540123 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util\") pod \"2fde53db-e7ae-4954-a4e8-2c3c28312031\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.540211 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxjhw\" (UniqueName: \"kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw\") pod \"2fde53db-e7ae-4954-a4e8-2c3c28312031\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.540269 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle\") pod \"2fde53db-e7ae-4954-a4e8-2c3c28312031\" (UID: \"2fde53db-e7ae-4954-a4e8-2c3c28312031\") " Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.540517 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ghxs\" (UniqueName: \"kubernetes.io/projected/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-kube-api-access-7ghxs\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.540548 4769 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7dbe76af-a39e-4a34-ae20-cc96a79e6b6c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.541661 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle" (OuterVolumeSpecName: "bundle") pod "2fde53db-e7ae-4954-a4e8-2c3c28312031" (UID: "2fde53db-e7ae-4954-a4e8-2c3c28312031"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.543955 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw" (OuterVolumeSpecName: "kube-api-access-lxjhw") pod "2fde53db-e7ae-4954-a4e8-2c3c28312031" (UID: "2fde53db-e7ae-4954-a4e8-2c3c28312031"). InnerVolumeSpecName "kube-api-access-lxjhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.562988 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util" (OuterVolumeSpecName: "util") pod "2fde53db-e7ae-4954-a4e8-2c3c28312031" (UID: "2fde53db-e7ae-4954-a4e8-2c3c28312031"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.641796 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-bundle\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.642189 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2fde53db-e7ae-4954-a4e8-2c3c28312031-util\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:23 crc kubenswrapper[4769]: I0131 16:46:23.642206 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxjhw\" (UniqueName: \"kubernetes.io/projected/2fde53db-e7ae-4954-a4e8-2c3c28312031-kube-api-access-lxjhw\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.118559 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" event={"ID":"7dbe76af-a39e-4a34-ae20-cc96a79e6b6c","Type":"ContainerDied","Data":"de4f9abaafc77ad67a8849b411e6df1c3872b8b7a5222b7a1a521c7bc913807e"} Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.118586 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-db-sync-zzwxm" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.118608 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de4f9abaafc77ad67a8849b411e6df1c3872b8b7a5222b7a1a521c7bc913807e" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.121428 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.121322 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz" event={"ID":"2fde53db-e7ae-4954-a4e8-2c3c28312031","Type":"ContainerDied","Data":"251bdd539ababcea7789433b4c8403500fbfd77b03f4d976236d1f1886263540"} Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.124611 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="251bdd539ababcea7789433b4c8403500fbfd77b03f4d976236d1f1886263540" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.372922 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh"] Jan 31 16:46:24 crc kubenswrapper[4769]: E0131 16:46:24.373203 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="pull" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373216 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="pull" Jan 31 16:46:24 crc kubenswrapper[4769]: E0131 16:46:24.373229 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="extract" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373235 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="extract" Jan 31 16:46:24 crc kubenswrapper[4769]: E0131 16:46:24.373243 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" containerName="barbican-db-sync" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373249 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" containerName="barbican-db-sync" Jan 31 16:46:24 crc kubenswrapper[4769]: E0131 16:46:24.373269 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="util" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373275 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="util" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373374 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dbe76af-a39e-4a34-ae20-cc96a79e6b6c" containerName="barbican-db-sync" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.373388 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fde53db-e7ae-4954-a4e8-2c3c28312031" containerName="extract" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.374104 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.376112 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-worker-config-data" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.376529 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-barbican-dockercfg-fxf82" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.376957 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-config-data" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.399367 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh"] Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.405046 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj"] Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.405987 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.409729 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-keystone-listener-config-data" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.434582 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj"] Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.453957 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04fee216-053a-4105-b59f-edca6bd15bdb-logs\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454008 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef94300f-9ac8-484f-8462-00ad88314f4a-logs\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454144 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454190 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data-custom\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454223 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx4ss\" (UniqueName: \"kubernetes.io/projected/04fee216-053a-4105-b59f-edca6bd15bdb-kube-api-access-wx4ss\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454256 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkjws\" (UniqueName: \"kubernetes.io/projected/ef94300f-9ac8-484f-8462-00ad88314f4a-kube-api-access-qkjws\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454321 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.454350 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data-custom\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.540122 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/barbican-api-5657759ccd-46xb5"] Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.541240 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.543413 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"barbican-api-config-data" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.551021 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-api-5657759ccd-46xb5"] Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555273 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555315 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04fee216-053a-4105-b59f-edca6bd15bdb-logs\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555335 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef94300f-9ac8-484f-8462-00ad88314f4a-logs\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555410 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data-custom\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555526 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555577 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data-custom\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555632 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx4ss\" (UniqueName: \"kubernetes.io/projected/04fee216-053a-4105-b59f-edca6bd15bdb-kube-api-access-wx4ss\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555660 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee526a1-93ff-433e-85e8-ec37678faa35-logs\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555694 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgttz\" (UniqueName: \"kubernetes.io/projected/9ee526a1-93ff-433e-85e8-ec37678faa35-kube-api-access-tgttz\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkjws\" (UniqueName: \"kubernetes.io/projected/ef94300f-9ac8-484f-8462-00ad88314f4a-kube-api-access-qkjws\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555742 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.555756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data-custom\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.556234 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04fee216-053a-4105-b59f-edca6bd15bdb-logs\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.556868 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef94300f-9ac8-484f-8462-00ad88314f4a-logs\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.559855 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.559886 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.560408 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef94300f-9ac8-484f-8462-00ad88314f4a-config-data-custom\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.561109 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/04fee216-053a-4105-b59f-edca6bd15bdb-config-data-custom\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.575022 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx4ss\" (UniqueName: \"kubernetes.io/projected/04fee216-053a-4105-b59f-edca6bd15bdb-kube-api-access-wx4ss\") pod \"barbican-keystone-listener-645f4bcb9b-jctzj\" (UID: \"04fee216-053a-4105-b59f-edca6bd15bdb\") " pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.587105 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkjws\" (UniqueName: \"kubernetes.io/projected/ef94300f-9ac8-484f-8462-00ad88314f4a-kube-api-access-qkjws\") pod \"barbican-worker-7dddbdf7d9-wrhnh\" (UID: \"ef94300f-9ac8-484f-8462-00ad88314f4a\") " pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.661053 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.661137 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data-custom\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.661236 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee526a1-93ff-433e-85e8-ec37678faa35-logs\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.661282 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgttz\" (UniqueName: \"kubernetes.io/projected/9ee526a1-93ff-433e-85e8-ec37678faa35-kube-api-access-tgttz\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.661698 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9ee526a1-93ff-433e-85e8-ec37678faa35-logs\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.665206 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.667160 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee526a1-93ff-433e-85e8-ec37678faa35-config-data-custom\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.675766 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgttz\" (UniqueName: \"kubernetes.io/projected/9ee526a1-93ff-433e-85e8-ec37678faa35-kube-api-access-tgttz\") pod \"barbican-api-5657759ccd-46xb5\" (UID: \"9ee526a1-93ff-433e-85e8-ec37678faa35\") " pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.697297 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.723094 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" Jan 31 16:46:24 crc kubenswrapper[4769]: I0131 16:46:24.856567 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:25 crc kubenswrapper[4769]: I0131 16:46:25.088933 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-api-5657759ccd-46xb5"] Jan 31 16:46:25 crc kubenswrapper[4769]: W0131 16:46:25.108631 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ee526a1_93ff_433e_85e8_ec37678faa35.slice/crio-f5877b194a173ddf27cdb9fd567d76059be615a2b57ad25e5c32b284fc19f79a WatchSource:0}: Error finding container f5877b194a173ddf27cdb9fd567d76059be615a2b57ad25e5c32b284fc19f79a: Status 404 returned error can't find the container with id f5877b194a173ddf27cdb9fd567d76059be615a2b57ad25e5c32b284fc19f79a Jan 31 16:46:25 crc kubenswrapper[4769]: I0131 16:46:25.148003 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" event={"ID":"9ee526a1-93ff-433e-85e8-ec37678faa35","Type":"ContainerStarted","Data":"f5877b194a173ddf27cdb9fd567d76059be615a2b57ad25e5c32b284fc19f79a"} Jan 31 16:46:25 crc kubenswrapper[4769]: I0131 16:46:25.152825 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh"] Jan 31 16:46:25 crc kubenswrapper[4769]: I0131 16:46:25.175870 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj"] Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.154558 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" event={"ID":"ef94300f-9ac8-484f-8462-00ad88314f4a","Type":"ContainerStarted","Data":"1d76afae81c4ddfc52e0e250ffb5fd625ff299c7cdbdc9174d036af2abf599e4"} Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.155637 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" event={"ID":"04fee216-053a-4105-b59f-edca6bd15bdb","Type":"ContainerStarted","Data":"91fe07706bffc69a48719166c88d066a4fc62bd56abcd036b9bbae6dea32c6b2"} Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.157237 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" event={"ID":"9ee526a1-93ff-433e-85e8-ec37678faa35","Type":"ContainerStarted","Data":"53661fbea45348f5a2f3f626996992198e4d8b124b579e335f18f3ba2fc2e94e"} Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.157280 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" event={"ID":"9ee526a1-93ff-433e-85e8-ec37678faa35","Type":"ContainerStarted","Data":"7b867df41f25ac06f12a6035d0740604de7e75832d0fc19daec102540f6dabba"} Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.157379 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:26 crc kubenswrapper[4769]: I0131 16:46:26.180593 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" podStartSLOduration=2.180579234 podStartE2EDuration="2.180579234s" podCreationTimestamp="2026-01-31 16:46:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 16:46:26.178194831 +0000 UTC m=+1034.252363500" watchObservedRunningTime="2026-01-31 16:46:26.180579234 +0000 UTC m=+1034.254747903" Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.164221 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" event={"ID":"ef94300f-9ac8-484f-8462-00ad88314f4a","Type":"ContainerStarted","Data":"5b61f8a41d7d32283140b3b91a097f535fc9fa4b31f84eee3c4061e6cb1843e6"} Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.164530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" event={"ID":"ef94300f-9ac8-484f-8462-00ad88314f4a","Type":"ContainerStarted","Data":"cc7f8ee07f5391f5bdf7070e8e1399d792fabd42392586c10d2090cd42120dcd"} Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.167864 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" event={"ID":"04fee216-053a-4105-b59f-edca6bd15bdb","Type":"ContainerStarted","Data":"e3362c08d2c302e5291fc19bcdd128a1c96d622e6a5be79686dc41ad258950df"} Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.167935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" event={"ID":"04fee216-053a-4105-b59f-edca6bd15bdb","Type":"ContainerStarted","Data":"1cf158e22c0b2f01a412fc16ee0f66323098126be35cdd454a813fcb50945365"} Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.167964 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:27 crc kubenswrapper[4769]: I0131 16:46:27.185309 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-worker-7dddbdf7d9-wrhnh" podStartSLOduration=1.921045553 podStartE2EDuration="3.185292796s" podCreationTimestamp="2026-01-31 16:46:24 +0000 UTC" firstStartedPulling="2026-01-31 16:46:25.143997552 +0000 UTC m=+1033.218166221" lastFinishedPulling="2026-01-31 16:46:26.408244795 +0000 UTC m=+1034.482413464" observedRunningTime="2026-01-31 16:46:27.184484394 +0000 UTC m=+1035.258653063" watchObservedRunningTime="2026-01-31 16:46:27.185292796 +0000 UTC m=+1035.259461485" Jan 31 16:46:32 crc kubenswrapper[4769]: I0131 16:46:32.297853 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" podUID="9ee526a1-93ff-433e-85e8-ec37678faa35" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 31 16:46:33 crc kubenswrapper[4769]: I0131 16:46:33.163727 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/keystone-c9ff7c785-ktbq4" Jan 31 16:46:33 crc kubenswrapper[4769]: I0131 16:46:33.184388 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/barbican-keystone-listener-645f4bcb9b-jctzj" podStartSLOduration=7.973652294 podStartE2EDuration="9.184364578s" podCreationTimestamp="2026-01-31 16:46:24 +0000 UTC" firstStartedPulling="2026-01-31 16:46:25.199313538 +0000 UTC m=+1033.273482197" lastFinishedPulling="2026-01-31 16:46:26.410025802 +0000 UTC m=+1034.484194481" observedRunningTime="2026-01-31 16:46:27.211735791 +0000 UTC m=+1035.285904460" watchObservedRunningTime="2026-01-31 16:46:33.184364578 +0000 UTC m=+1041.258533287" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.363394 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj"] Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.364419 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.366753 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-service-cert" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.366834 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-xn8db" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.389923 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj"] Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.514796 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvwgp\" (UniqueName: \"kubernetes.io/projected/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-kube-api-access-xvwgp\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.514940 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-apiservice-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.514980 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-webhook-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.616747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvwgp\" (UniqueName: \"kubernetes.io/projected/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-kube-api-access-xvwgp\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.616881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-apiservice-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.616920 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-webhook-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.635353 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-webhook-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.635399 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-apiservice-cert\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.653638 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvwgp\" (UniqueName: \"kubernetes.io/projected/6373c7e8-28b1-4687-b2f0-baab31a0ae5b-kube-api-access-xvwgp\") pod \"swift-operator-controller-manager-7c45849b49-zv5rj\" (UID: \"6373c7e8-28b1-4687-b2f0-baab31a0ae5b\") " pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:34 crc kubenswrapper[4769]: I0131 16:46:34.686125 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:35 crc kubenswrapper[4769]: I0131 16:46:35.276865 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj"] Jan 31 16:46:36 crc kubenswrapper[4769]: I0131 16:46:36.233538 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" event={"ID":"6373c7e8-28b1-4687-b2f0-baab31a0ae5b","Type":"ContainerStarted","Data":"d17daf0700d99e5cb1c1e6108ad3b1978c31be5c09ab69ead21f6d8387f9c583"} Jan 31 16:46:36 crc kubenswrapper[4769]: I0131 16:46:36.433190 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:36 crc kubenswrapper[4769]: I0131 16:46:36.468097 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="swift-kuttl-tests/barbican-api-5657759ccd-46xb5" Jan 31 16:46:38 crc kubenswrapper[4769]: I0131 16:46:38.246455 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" event={"ID":"6373c7e8-28b1-4687-b2f0-baab31a0ae5b","Type":"ContainerStarted","Data":"6cefe8cd818ab7c358edce9063e6aea72c33181653d8ee69e8f664c0a6ffe5fa"} Jan 31 16:46:38 crc kubenswrapper[4769]: I0131 16:46:38.246813 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:38 crc kubenswrapper[4769]: I0131 16:46:38.266173 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" podStartSLOduration=1.583586585 podStartE2EDuration="4.266153089s" podCreationTimestamp="2026-01-31 16:46:34 +0000 UTC" firstStartedPulling="2026-01-31 16:46:35.28263905 +0000 UTC m=+1043.356807719" lastFinishedPulling="2026-01-31 16:46:37.965205534 +0000 UTC m=+1046.039374223" observedRunningTime="2026-01-31 16:46:38.259756429 +0000 UTC m=+1046.333925098" watchObservedRunningTime="2026-01-31 16:46:38.266153089 +0000 UTC m=+1046.340321758" Jan 31 16:46:44 crc kubenswrapper[4769]: I0131 16:46:44.694476 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-7c45849b49-zv5rj" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.493910 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.503467 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.507580 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-files" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.507905 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.508850 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-swift-dockercfg-vpzcp" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.508953 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-conf" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.517126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.625569 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvlwz\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.625643 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.625680 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.625718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.625740 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.727356 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvlwz\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.727442 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.727486 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.727543 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.727573 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: E0131 16:46:55.727853 4769 projected.go:288] Couldn't get configMap swift-kuttl-tests/swift-ring-files: configmap "swift-ring-files" not found Jan 31 16:46:55 crc kubenswrapper[4769]: E0131 16:46:55.727911 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod swift-kuttl-tests/swift-storage-0: configmap "swift-ring-files" not found Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.728014 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") device mount path \"/mnt/openstack/pv11\"" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: E0131 16:46:55.728028 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift podName:2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc nodeName:}" failed. No retries permitted until 2026-01-31 16:46:56.2279909 +0000 UTC m=+1064.302159649 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift") pod "swift-storage-0" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc") : configmap "swift-ring-files" not found Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.728155 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.728382 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.750048 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvlwz\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.752017 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.979228 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-xskfg"] Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.980318 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.988752 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-config-data" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.989217 4769 reflector.go:368] Caches populated for *v1.Secret from object-"swift-kuttl-tests"/"swift-proxy-config-data" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.989882 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-ring-scripts" Jan 31 16:46:55 crc kubenswrapper[4769]: I0131 16:46:55.998190 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-xskfg"] Jan 31 16:46:55 crc kubenswrapper[4769]: E0131 16:46:55.998815 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[dispersionconf etc-swift kube-api-access-lbbm6 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[dispersionconf etc-swift kube-api-access-lbbm6 ring-data-devices scripts swiftconf]: context canceled" pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" podUID="2f895234-cacd-48ae-94e3-ff57a223a496" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.027888 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-2sjs2"] Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.028828 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.036200 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-xskfg"] Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.048880 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-2sjs2"] Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.132566 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.132860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133052 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-dispersionconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133168 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mws6d\" (UniqueName: \"kubernetes.io/projected/54c0116b-a027-4f11-8b6b-aa00778f1acb-kube-api-access-mws6d\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133555 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133661 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133780 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-swiftconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.133908 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.134013 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.134139 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/54c0116b-a027-4f11-8b6b-aa00778f1acb-etc-swift\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.134270 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbbm6\" (UniqueName: \"kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.134386 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-scripts\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.236132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-dispersionconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.236533 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mws6d\" (UniqueName: \"kubernetes.io/projected/54c0116b-a027-4f11-8b6b-aa00778f1acb-kube-api-access-mws6d\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.236778 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.236958 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.237167 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-swiftconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.237358 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.237569 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.237793 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/54c0116b-a027-4f11-8b6b-aa00778f1acb-etc-swift\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.238137 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/54c0116b-a027-4f11-8b6b-aa00778f1acb-etc-swift\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.237070 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.238233 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:46:56.738212425 +0000 UTC m=+1064.812381104 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.237671 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.237837 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.238330 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices podName:2f895234-cacd-48ae-94e3-ff57a223a496 nodeName:}" failed. No retries permitted until 2026-01-31 16:46:56.738306447 +0000 UTC m=+1064.812475116 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices") pod "swift-ring-rebalance-xskfg" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496") : configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.238151 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbbm6\" (UniqueName: \"kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.241163 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-scripts\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.239638 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-scripts\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.241747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.242657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.242759 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.242972 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-swiftconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.243616 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.245486 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/54c0116b-a027-4f11-8b6b-aa00778f1acb-dispersionconf\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.245880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.246395 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") pod \"swift-storage-0\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.248910 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.267109 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbbm6\" (UniqueName: \"kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.267615 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mws6d\" (UniqueName: \"kubernetes.io/projected/54c0116b-a027-4f11-8b6b-aa00778f1acb-kube-api-access-mws6d\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.326467 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn"] Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.327400 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.337951 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn"] Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.410830 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.418143 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.420226 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.444980 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvlvg\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-kube-api-access-bvlvg\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.445153 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-etc-swift\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.445222 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-log-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.445263 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-config-data\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.445309 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-run-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546151 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf\") pod \"2f895234-cacd-48ae-94e3-ff57a223a496\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546248 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift\") pod \"2f895234-cacd-48ae-94e3-ff57a223a496\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546280 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbbm6\" (UniqueName: \"kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6\") pod \"2f895234-cacd-48ae-94e3-ff57a223a496\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546335 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf\") pod \"2f895234-cacd-48ae-94e3-ff57a223a496\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546426 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts\") pod \"2f895234-cacd-48ae-94e3-ff57a223a496\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546609 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2f895234-cacd-48ae-94e3-ff57a223a496" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546689 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-run-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvlvg\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-kube-api-access-bvlvg\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-etc-swift\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546841 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-log-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546876 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-config-data\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.546940 4769 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2f895234-cacd-48ae-94e3-ff57a223a496-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.547096 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts" (OuterVolumeSpecName: "scripts") pod "2f895234-cacd-48ae-94e3-ff57a223a496" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.547273 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-run-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.547660 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-log-httpd\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.550025 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6" (OuterVolumeSpecName: "kube-api-access-lbbm6") pod "2f895234-cacd-48ae-94e3-ff57a223a496" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496"). InnerVolumeSpecName "kube-api-access-lbbm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.550317 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "2f895234-cacd-48ae-94e3-ff57a223a496" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.550483 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "2f895234-cacd-48ae-94e3-ff57a223a496" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.551330 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-config-data\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.552800 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-etc-swift\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.563188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvlvg\" (UniqueName: \"kubernetes.io/projected/fb764692-fbb8-4fb4-860c-2cd0e0cfd452-kube-api-access-bvlvg\") pod \"swift-proxy-7d8cf99555-gcpxn\" (UID: \"fb764692-fbb8-4fb4-860c-2cd0e0cfd452\") " pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.643953 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.648604 4769 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.648892 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-scripts\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.648920 4769 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2f895234-cacd-48ae-94e3-ff57a223a496-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.648933 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbbm6\" (UniqueName: \"kubernetes.io/projected/2f895234-cacd-48ae-94e3-ff57a223a496-kube-api-access-lbbm6\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.750751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.750797 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices\") pod \"swift-ring-rebalance-xskfg\" (UID: \"2f895234-cacd-48ae-94e3-ff57a223a496\") " pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.750932 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.750980 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices podName:2f895234-cacd-48ae-94e3-ff57a223a496 nodeName:}" failed. No retries permitted until 2026-01-31 16:46:57.750962728 +0000 UTC m=+1065.825131397 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices") pod "swift-ring-rebalance-xskfg" (UID: "2f895234-cacd-48ae-94e3-ff57a223a496") : configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.751266 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: E0131 16:46:56.751288 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:46:57.751280706 +0000 UTC m=+1065.825449375 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:46:56 crc kubenswrapper[4769]: I0131 16:46:56.862782 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:46:56 crc kubenswrapper[4769]: W0131 16:46:56.868996 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d4b411e_0d38_4e04_a5d5_dfda91ec6ebc.slice/crio-a2690a6a2281cbd47c5d2d20cd8ab0432f06ed91625c342732d742c7207158c1 WatchSource:0}: Error finding container a2690a6a2281cbd47c5d2d20cd8ab0432f06ed91625c342732d742c7207158c1: Status 404 returned error can't find the container with id a2690a6a2281cbd47c5d2d20cd8ab0432f06ed91625c342732d742c7207158c1 Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.071386 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn"] Jan 31 16:46:57 crc kubenswrapper[4769]: W0131 16:46:57.078767 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb764692_fbb8_4fb4_860c_2cd0e0cfd452.slice/crio-dba4d7385cb855cf4cff342329c346a3b082aefe7a512589f7e1089f22f6c985 WatchSource:0}: Error finding container dba4d7385cb855cf4cff342329c346a3b082aefe7a512589f7e1089f22f6c985: Status 404 returned error can't find the container with id dba4d7385cb855cf4cff342329c346a3b082aefe7a512589f7e1089f22f6c985 Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.421066 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"dba4d7385cb855cf4cff342329c346a3b082aefe7a512589f7e1089f22f6c985"} Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.422407 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"a2690a6a2281cbd47c5d2d20cd8ab0432f06ed91625c342732d742c7207158c1"} Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.422443 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-xskfg" Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.467583 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-xskfg"] Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.476932 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-ring-rebalance-xskfg"] Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.562858 4769 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2f895234-cacd-48ae-94e3-ff57a223a496-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 31 16:46:57 crc kubenswrapper[4769]: I0131 16:46:57.766297 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:57 crc kubenswrapper[4769]: E0131 16:46:57.766407 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:57 crc kubenswrapper[4769]: E0131 16:46:57.766459 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:46:59.766442616 +0000 UTC m=+1067.840611285 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:46:58 crc kubenswrapper[4769]: I0131 16:46:58.717671 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f895234-cacd-48ae-94e3-ff57a223a496" path="/var/lib/kubelet/pods/2f895234-cacd-48ae-94e3-ff57a223a496/volumes" Jan 31 16:46:59 crc kubenswrapper[4769]: I0131 16:46:59.794669 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:46:59 crc kubenswrapper[4769]: E0131 16:46:59.795352 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:46:59 crc kubenswrapper[4769]: E0131 16:46:59.795783 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:47:03.795767361 +0000 UTC m=+1071.869936030 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:47:00 crc kubenswrapper[4769]: I0131 16:47:00.460979 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b"} Jan 31 16:47:00 crc kubenswrapper[4769]: I0131 16:47:00.467863 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185"} Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.506588 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="9e498ba4aa0ee505c5fe108586828708aab2e0cfcd1a5f39bab78b2449081c98" exitCode=1 Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.506653 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe"} Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.506932 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49"} Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.506950 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"9e498ba4aa0ee505c5fe108586828708aab2e0cfcd1a5f39bab78b2449081c98"} Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.510056 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"0dbb0d13a88ffbb97c74a44628dce348a3397256ba96bbf3a73afd4bcad938c0"} Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.510276 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.510432 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:01 crc kubenswrapper[4769]: I0131 16:47:01.546032 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podStartSLOduration=2.453046025 podStartE2EDuration="5.546011243s" podCreationTimestamp="2026-01-31 16:46:56 +0000 UTC" firstStartedPulling="2026-01-31 16:46:57.080441684 +0000 UTC m=+1065.154610343" lastFinishedPulling="2026-01-31 16:47:00.173406892 +0000 UTC m=+1068.247575561" observedRunningTime="2026-01-31 16:47:01.538112202 +0000 UTC m=+1069.612280881" watchObservedRunningTime="2026-01-31 16:47:01.546011243 +0000 UTC m=+1069.620179922" Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.534996 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="0dbb0d13a88ffbb97c74a44628dce348a3397256ba96bbf3a73afd4bcad938c0" exitCode=1 Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.535068 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"0dbb0d13a88ffbb97c74a44628dce348a3397256ba96bbf3a73afd4bcad938c0"} Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.535768 4769 scope.go:117] "RemoveContainer" containerID="0dbb0d13a88ffbb97c74a44628dce348a3397256ba96bbf3a73afd4bcad938c0" Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.546764 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b"} Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.546804 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"31ec6e24c2538760d92c6a8cf055d99c1ee06368c14914f75736cf5f9a2ee16b"} Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.546815 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050"} Jan 31 16:47:02 crc kubenswrapper[4769]: I0131 16:47:02.645487 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.557697 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a"} Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.558556 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:03 crc kubenswrapper[4769]: E0131 16:47:03.558968 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.564359 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="31ec6e24c2538760d92c6a8cf055d99c1ee06368c14914f75736cf5f9a2ee16b" exitCode=1 Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.564411 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"31ec6e24c2538760d92c6a8cf055d99c1ee06368c14914f75736cf5f9a2ee16b"} Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.564441 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"09b7562c3d466a41880f5909b6af118ad218111a5b099f6f918545032b3764fa"} Jan 31 16:47:03 crc kubenswrapper[4769]: I0131 16:47:03.828110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:47:03 crc kubenswrapper[4769]: E0131 16:47:03.829279 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:47:03 crc kubenswrapper[4769]: E0131 16:47:03.829332 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:47:11.82931574 +0000 UTC m=+1079.903484409 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579048 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579092 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"9da987766f9bcc0221be6b69a7b2dddfeb05d641cc869c24584c2a7ffe73b5e6"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579106 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"30a79b373b594fc90eeefd92adc4112a9a9bf07fda7fbc1c0b65d00fc6b49abf"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579129 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.579141 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.582183 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" exitCode=1 Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.582241 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a"} Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.582274 4769 scope.go:117] "RemoveContainer" containerID="0dbb0d13a88ffbb97c74a44628dce348a3397256ba96bbf3a73afd4bcad938c0" Jan 31 16:47:04 crc kubenswrapper[4769]: I0131 16:47:04.582955 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:04 crc kubenswrapper[4769]: E0131 16:47:04.584472 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.600136 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:05 crc kubenswrapper[4769]: E0131 16:47:05.602547 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.608893 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.610259 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="9da987766f9bcc0221be6b69a7b2dddfeb05d641cc869c24584c2a7ffe73b5e6" exitCode=1 Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.610303 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"9da987766f9bcc0221be6b69a7b2dddfeb05d641cc869c24584c2a7ffe73b5e6"} Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.610335 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0"} Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.610908 4769 scope.go:117] "RemoveContainer" containerID="9e498ba4aa0ee505c5fe108586828708aab2e0cfcd1a5f39bab78b2449081c98" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.610981 4769 scope.go:117] "RemoveContainer" containerID="31ec6e24c2538760d92c6a8cf055d99c1ee06368c14914f75736cf5f9a2ee16b" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.611073 4769 scope.go:117] "RemoveContainer" containerID="9da987766f9bcc0221be6b69a7b2dddfeb05d641cc869c24584c2a7ffe73b5e6" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.645306 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:05 crc kubenswrapper[4769]: I0131 16:47:05.646843 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.622933 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47" exitCode=1 Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623239 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd" exitCode=1 Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623248 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62" exitCode=1 Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623073 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47"} Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623350 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd"} Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623364 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62"} Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.623379 4769 scope.go:117] "RemoveContainer" containerID="9da987766f9bcc0221be6b69a7b2dddfeb05d641cc869c24584c2a7ffe73b5e6" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.624030 4769 scope.go:117] "RemoveContainer" containerID="f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.624477 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:06 crc kubenswrapper[4769]: E0131 16:47:06.624799 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.625207 4769 scope.go:117] "RemoveContainer" containerID="abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.625364 4769 scope.go:117] "RemoveContainer" containerID="01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47" Jan 31 16:47:06 crc kubenswrapper[4769]: E0131 16:47:06.625717 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.626509 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.644630 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.647171 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.681160 4769 scope.go:117] "RemoveContainer" containerID="31ec6e24c2538760d92c6a8cf055d99c1ee06368c14914f75736cf5f9a2ee16b" Jan 31 16:47:06 crc kubenswrapper[4769]: I0131 16:47:06.725485 4769 scope.go:117] "RemoveContainer" containerID="9e498ba4aa0ee505c5fe108586828708aab2e0cfcd1a5f39bab78b2449081c98" Jan 31 16:47:07 crc kubenswrapper[4769]: I0131 16:47:07.642303 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:07 crc kubenswrapper[4769]: I0131 16:47:07.642732 4769 scope.go:117] "RemoveContainer" containerID="f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62" Jan 31 16:47:07 crc kubenswrapper[4769]: E0131 16:47:07.642766 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:07 crc kubenswrapper[4769]: I0131 16:47:07.642853 4769 scope.go:117] "RemoveContainer" containerID="abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd" Jan 31 16:47:07 crc kubenswrapper[4769]: I0131 16:47:07.643034 4769 scope.go:117] "RemoveContainer" containerID="01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47" Jan 31 16:47:07 crc kubenswrapper[4769]: E0131 16:47:07.643528 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:07 crc kubenswrapper[4769]: I0131 16:47:07.643999 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:08 crc kubenswrapper[4769]: I0131 16:47:08.646484 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.647909 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.648852 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.648917 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.650825 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.650879 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.650942 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185" gracePeriod=30 Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.652114 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:11 crc kubenswrapper[4769]: I0131 16:47:11.875659 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:47:11 crc kubenswrapper[4769]: E0131 16:47:11.875797 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:47:11 crc kubenswrapper[4769]: E0131 16:47:11.875853 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:47:27.875836 +0000 UTC m=+1095.950004679 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:47:11 crc kubenswrapper[4769]: E0131 16:47:11.986317 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:12 crc kubenswrapper[4769]: I0131 16:47:12.691486 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185" exitCode=0 Jan 31 16:47:12 crc kubenswrapper[4769]: I0131 16:47:12.691537 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185"} Jan 31 16:47:12 crc kubenswrapper[4769]: I0131 16:47:12.691599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3"} Jan 31 16:47:12 crc kubenswrapper[4769]: I0131 16:47:12.691806 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:12 crc kubenswrapper[4769]: I0131 16:47:12.692240 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:12 crc kubenswrapper[4769]: E0131 16:47:12.692433 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 10s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:13 crc kubenswrapper[4769]: I0131 16:47:13.703821 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:14 crc kubenswrapper[4769]: I0131 16:47:14.717545 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc"} Jan 31 16:47:14 crc kubenswrapper[4769]: I0131 16:47:14.718340 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:15 crc kubenswrapper[4769]: I0131 16:47:15.725521 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" exitCode=1 Jan 31 16:47:15 crc kubenswrapper[4769]: I0131 16:47:15.725593 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc"} Jan 31 16:47:15 crc kubenswrapper[4769]: I0131 16:47:15.725831 4769 scope.go:117] "RemoveContainer" containerID="84ab2349590553bda5b666892ec122ee0a348b422d189b8805c3b0af2fd68d8a" Jan 31 16:47:15 crc kubenswrapper[4769]: I0131 16:47:15.727111 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:15 crc kubenswrapper[4769]: E0131 16:47:15.728026 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:16 crc kubenswrapper[4769]: I0131 16:47:16.647852 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:16 crc kubenswrapper[4769]: I0131 16:47:16.741685 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:16 crc kubenswrapper[4769]: E0131 16:47:16.741886 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:16 crc kubenswrapper[4769]: I0131 16:47:16.743465 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:17 crc kubenswrapper[4769]: I0131 16:47:17.645410 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:17 crc kubenswrapper[4769]: I0131 16:47:17.647536 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:17 crc kubenswrapper[4769]: I0131 16:47:17.749265 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:17 crc kubenswrapper[4769]: E0131 16:47:17.749682 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:17 crc kubenswrapper[4769]: I0131 16:47:17.750737 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:20 crc kubenswrapper[4769]: I0131 16:47:20.646516 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:21 crc kubenswrapper[4769]: I0131 16:47:21.647412 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:21 crc kubenswrapper[4769]: I0131 16:47:21.709047 4769 scope.go:117] "RemoveContainer" containerID="f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62" Jan 31 16:47:21 crc kubenswrapper[4769]: I0131 16:47:21.709183 4769 scope.go:117] "RemoveContainer" containerID="abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd" Jan 31 16:47:21 crc kubenswrapper[4769]: I0131 16:47:21.709358 4769 scope.go:117] "RemoveContainer" containerID="01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47" Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.802666 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" exitCode=1 Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.802966 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" exitCode=1 Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.802986 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6"} Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.803011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac"} Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.803024 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd"} Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.803039 4769 scope.go:117] "RemoveContainer" containerID="abfad90412137dc6b7517b04cb413659603365810ca4cba4dc4af42745c5cecd" Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.803630 4769 scope.go:117] "RemoveContainer" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.803693 4769 scope.go:117] "RemoveContainer" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" Jan 31 16:47:22 crc kubenswrapper[4769]: E0131 16:47:22.804102 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:22 crc kubenswrapper[4769]: I0131 16:47:22.874779 4769 scope.go:117] "RemoveContainer" containerID="f3e7a5764f5106a883da87618697e2ad33148d3c1da28f2b4116b24f53473b62" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.648281 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.649015 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.650714 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.650921 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.651119 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3" gracePeriod=30 Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.651107 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.817796 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3" exitCode=0 Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.817876 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3"} Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.817927 4769 scope.go:117] "RemoveContainer" containerID="aaff4d572471ceb8b29f2b77a82e0f8260b9e34f4738e834c848c725b9b4b185" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.834712 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6" exitCode=1 Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.834777 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6"} Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.835654 4769 scope.go:117] "RemoveContainer" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.835752 4769 scope.go:117] "RemoveContainer" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.835895 4769 scope.go:117] "RemoveContainer" containerID="7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6" Jan 31 16:47:23 crc kubenswrapper[4769]: E0131 16:47:23.836317 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:23 crc kubenswrapper[4769]: I0131 16:47:23.884710 4769 scope.go:117] "RemoveContainer" containerID="01193808e3cbf5a9bcefe1bee452126298f36c8454b27c3d31ef568eab7d6a47" Jan 31 16:47:24 crc kubenswrapper[4769]: E0131 16:47:24.000220 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:24 crc kubenswrapper[4769]: I0131 16:47:24.844959 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6"} Jan 31 16:47:24 crc kubenswrapper[4769]: I0131 16:47:24.845573 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:24 crc kubenswrapper[4769]: I0131 16:47:24.845974 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:24 crc kubenswrapper[4769]: E0131 16:47:24.846618 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:25 crc kubenswrapper[4769]: I0131 16:47:25.863976 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:25 crc kubenswrapper[4769]: E0131 16:47:25.864315 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:27 crc kubenswrapper[4769]: I0131 16:47:27.954729 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:47:27 crc kubenswrapper[4769]: E0131 16:47:27.954897 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:47:27 crc kubenswrapper[4769]: E0131 16:47:27.955244 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:47:59.955226645 +0000 UTC m=+1128.029395314 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:47:29 crc kubenswrapper[4769]: I0131 16:47:29.648555 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:31 crc kubenswrapper[4769]: I0131 16:47:31.646903 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:32 crc kubenswrapper[4769]: I0131 16:47:32.646461 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.647774 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.648268 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.649097 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.649132 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.649169 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6" gracePeriod=30 Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.651709 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.958907 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6" exitCode=0 Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.958958 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6"} Jan 31 16:47:35 crc kubenswrapper[4769]: I0131 16:47:35.959046 4769 scope.go:117] "RemoveContainer" containerID="013e1597ff01e4f3e852e73fc31a65007b7784a5a2b2fb8e57e810edda59fea3" Jan 31 16:47:36 crc kubenswrapper[4769]: I0131 16:47:36.646856 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:36 crc kubenswrapper[4769]: I0131 16:47:36.970893 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d"} Jan 31 16:47:36 crc kubenswrapper[4769]: I0131 16:47:36.970928 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7"} Jan 31 16:47:36 crc kubenswrapper[4769]: I0131 16:47:36.971147 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:36 crc kubenswrapper[4769]: I0131 16:47:36.972248 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:37 crc kubenswrapper[4769]: I0131 16:47:37.998790 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" exitCode=1 Jan 31 16:47:37 crc kubenswrapper[4769]: I0131 16:47:37.998847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d"} Jan 31 16:47:37 crc kubenswrapper[4769]: I0131 16:47:37.999135 4769 scope.go:117] "RemoveContainer" containerID="0c6f2f2825d80f347f54662215f95dc6a74e62677d49beffffa20f09de1e7cfc" Jan 31 16:47:37 crc kubenswrapper[4769]: I0131 16:47:37.999342 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:37 crc kubenswrapper[4769]: I0131 16:47:37.999934 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:38 crc kubenswrapper[4769]: E0131 16:47:38.000262 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:38 crc kubenswrapper[4769]: I0131 16:47:38.645087 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:38 crc kubenswrapper[4769]: I0131 16:47:38.707911 4769 scope.go:117] "RemoveContainer" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" Jan 31 16:47:38 crc kubenswrapper[4769]: I0131 16:47:38.707972 4769 scope.go:117] "RemoveContainer" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" Jan 31 16:47:38 crc kubenswrapper[4769]: I0131 16:47:38.708061 4769 scope.go:117] "RemoveContainer" containerID="7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6" Jan 31 16:47:38 crc kubenswrapper[4769]: E0131 16:47:38.708378 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:39 crc kubenswrapper[4769]: I0131 16:47:39.012404 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:39 crc kubenswrapper[4769]: E0131 16:47:39.012778 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:40 crc kubenswrapper[4769]: I0131 16:47:40.020246 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:40 crc kubenswrapper[4769]: E0131 16:47:40.020986 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:41 crc kubenswrapper[4769]: I0131 16:47:41.647691 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:41 crc kubenswrapper[4769]: I0131 16:47:41.648010 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:44 crc kubenswrapper[4769]: I0131 16:47:44.646757 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:46 crc kubenswrapper[4769]: I0131 16:47:46.647077 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.647073 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.648106 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.649390 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.649443 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.649529 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" gracePeriod=30 Jan 31 16:47:47 crc kubenswrapper[4769]: I0131 16:47:47.651366 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:47:47 crc kubenswrapper[4769]: E0131 16:47:47.790480 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:48 crc kubenswrapper[4769]: I0131 16:47:48.107475 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" exitCode=0 Jan 31 16:47:48 crc kubenswrapper[4769]: I0131 16:47:48.107666 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7"} Jan 31 16:47:48 crc kubenswrapper[4769]: I0131 16:47:48.108082 4769 scope.go:117] "RemoveContainer" containerID="7489ca7108cf27c2b9dbdf9cfee37c1cb961142f50ed12892786dbef874a95d6" Jan 31 16:47:48 crc kubenswrapper[4769]: I0131 16:47:48.109661 4769 scope.go:117] "RemoveContainer" containerID="82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" Jan 31 16:47:48 crc kubenswrapper[4769]: I0131 16:47:48.109727 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:48 crc kubenswrapper[4769]: E0131 16:47:48.112781 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:47:50 crc kubenswrapper[4769]: I0131 16:47:50.682611 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:47:50 crc kubenswrapper[4769]: I0131 16:47:50.683103 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:47:53 crc kubenswrapper[4769]: I0131 16:47:53.709867 4769 scope.go:117] "RemoveContainer" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" Jan 31 16:47:53 crc kubenswrapper[4769]: I0131 16:47:53.710005 4769 scope.go:117] "RemoveContainer" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" Jan 31 16:47:53 crc kubenswrapper[4769]: I0131 16:47:53.710183 4769 scope.go:117] "RemoveContainer" containerID="7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6" Jan 31 16:47:54 crc kubenswrapper[4769]: I0131 16:47:54.174194 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006"} Jan 31 16:47:54 crc kubenswrapper[4769]: I0131 16:47:54.174709 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160"} Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.203947 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" exitCode=1 Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204374 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" exitCode=1 Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204394 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" exitCode=1 Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204067 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006"} Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204462 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160"} Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204487 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d"} Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.204547 4769 scope.go:117] "RemoveContainer" containerID="1df0281136f40468d1df1d849b5642d1ee22146b1e11af6810180df50eac97ac" Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.205685 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.205981 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.206201 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:47:55 crc kubenswrapper[4769]: E0131 16:47:55.206763 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.279975 4769 scope.go:117] "RemoveContainer" containerID="1969d639137670309947905db392a6df8db778fc5ddcc99a6312f3b13fba09bd" Jan 31 16:47:55 crc kubenswrapper[4769]: I0131 16:47:55.329181 4769 scope.go:117] "RemoveContainer" containerID="7e9d034ea0da4a4318563a74272a4097583bb75421c6d2c785a08de4097471b6" Jan 31 16:47:56 crc kubenswrapper[4769]: I0131 16:47:56.223303 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:47:56 crc kubenswrapper[4769]: I0131 16:47:56.224436 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:47:56 crc kubenswrapper[4769]: I0131 16:47:56.224804 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:47:56 crc kubenswrapper[4769]: E0131 16:47:56.225599 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:47:59 crc kubenswrapper[4769]: I0131 16:47:59.709283 4769 scope.go:117] "RemoveContainer" containerID="82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" Jan 31 16:47:59 crc kubenswrapper[4769]: I0131 16:47:59.710633 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:47:59 crc kubenswrapper[4769]: E0131 16:47:59.711145 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:00 crc kubenswrapper[4769]: I0131 16:48:00.001921 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:48:00 crc kubenswrapper[4769]: E0131 16:48:00.002173 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:48:00 crc kubenswrapper[4769]: E0131 16:48:00.002307 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:49:04.002257722 +0000 UTC m=+1192.076426401 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:48:09 crc kubenswrapper[4769]: I0131 16:48:09.707940 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:48:09 crc kubenswrapper[4769]: I0131 16:48:09.708449 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:48:09 crc kubenswrapper[4769]: I0131 16:48:09.708572 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:48:09 crc kubenswrapper[4769]: E0131 16:48:09.708840 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:10 crc kubenswrapper[4769]: I0131 16:48:10.708999 4769 scope.go:117] "RemoveContainer" containerID="82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" Jan 31 16:48:10 crc kubenswrapper[4769]: I0131 16:48:10.709041 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:48:10 crc kubenswrapper[4769]: E0131 16:48:10.929441 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:11 crc kubenswrapper[4769]: I0131 16:48:11.366411 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677"} Jan 31 16:48:11 crc kubenswrapper[4769]: I0131 16:48:11.367079 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:48:11 crc kubenswrapper[4769]: I0131 16:48:11.367282 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:48:11 crc kubenswrapper[4769]: E0131 16:48:11.367692 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:12 crc kubenswrapper[4769]: I0131 16:48:12.377852 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:48:12 crc kubenswrapper[4769]: E0131 16:48:12.378758 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:16 crc kubenswrapper[4769]: I0131 16:48:16.649986 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:17 crc kubenswrapper[4769]: I0131 16:48:17.647553 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:20 crc kubenswrapper[4769]: I0131 16:48:20.647815 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:20 crc kubenswrapper[4769]: I0131 16:48:20.682410 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:48:20 crc kubenswrapper[4769]: I0131 16:48:20.682545 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:48:21 crc kubenswrapper[4769]: I0131 16:48:21.648032 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.646378 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.646759 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.647772 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.647809 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.647853 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" gracePeriod=30 Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.648200 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.708940 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.709027 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:48:23 crc kubenswrapper[4769]: I0131 16:48:23.709202 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:48:23 crc kubenswrapper[4769]: E0131 16:48:23.709626 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:23 crc kubenswrapper[4769]: E0131 16:48:23.932743 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490038 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" exitCode=0 Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490123 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677"} Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490386 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550"} Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490410 4769 scope.go:117] "RemoveContainer" containerID="82b8e532ba4d701b6d414ae29d17a25768fedfd058eb3b0abd62e89c6bd1ceb7" Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490537 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:48:24 crc kubenswrapper[4769]: I0131 16:48:24.490885 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:24 crc kubenswrapper[4769]: E0131 16:48:24.491083 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:25 crc kubenswrapper[4769]: I0131 16:48:25.499535 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" exitCode=1 Jan 31 16:48:25 crc kubenswrapper[4769]: I0131 16:48:25.499578 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550"} Jan 31 16:48:25 crc kubenswrapper[4769]: I0131 16:48:25.499611 4769 scope.go:117] "RemoveContainer" containerID="411b0835913fe29702f8101824c5797842252de527848b5744db90f91b93fe5d" Jan 31 16:48:25 crc kubenswrapper[4769]: I0131 16:48:25.500286 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:25 crc kubenswrapper[4769]: I0131 16:48:25.500318 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:48:25 crc kubenswrapper[4769]: E0131 16:48:25.502107 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:26 crc kubenswrapper[4769]: I0131 16:48:26.516224 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:26 crc kubenswrapper[4769]: I0131 16:48:26.516267 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:48:26 crc kubenswrapper[4769]: E0131 16:48:26.516678 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:26 crc kubenswrapper[4769]: I0131 16:48:26.644929 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:48:27 crc kubenswrapper[4769]: I0131 16:48:27.525228 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:27 crc kubenswrapper[4769]: I0131 16:48:27.525279 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:48:27 crc kubenswrapper[4769]: E0131 16:48:27.525693 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:38 crc kubenswrapper[4769]: I0131 16:48:38.708437 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:48:38 crc kubenswrapper[4769]: I0131 16:48:38.710447 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:48:38 crc kubenswrapper[4769]: I0131 16:48:38.710776 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.651809 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" exitCode=1 Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.652274 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" exitCode=1 Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.651889 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b"} Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.652318 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851"} Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.652340 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117"} Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.652362 4769 scope.go:117] "RemoveContainer" containerID="ff3557930426e147241892853f4c090c641291f45893ae5f83ac3ce4fee42006" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.653136 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.653252 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:39 crc kubenswrapper[4769]: E0131 16:48:39.653778 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.705411 4769 scope.go:117] "RemoveContainer" containerID="d3fe766168d1ddf08c138941ef0f8ded6e6bdd5a57624307702be2b9514d8160" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.707889 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:39 crc kubenswrapper[4769]: I0131 16:48:39.707922 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:48:39 crc kubenswrapper[4769]: E0131 16:48:39.708290 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.685837 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" exitCode=1 Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.685899 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b"} Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.685950 4769 scope.go:117] "RemoveContainer" containerID="76f118a8e473c81c4af2c11be5dd0c638cafcf645d0ee189702747f4ed43b72d" Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.686837 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.686953 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:40 crc kubenswrapper[4769]: I0131 16:48:40.687140 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:48:40 crc kubenswrapper[4769]: E0131 16:48:40.687773 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.735115 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="09b7562c3d466a41880f5909b6af118ad218111a5b099f6f918545032b3764fa" exitCode=1 Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.735167 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"09b7562c3d466a41880f5909b6af118ad218111a5b099f6f918545032b3764fa"} Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.737559 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.737696 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.737773 4769 scope.go:117] "RemoveContainer" containerID="09b7562c3d466a41880f5909b6af118ad218111a5b099f6f918545032b3764fa" Jan 31 16:48:45 crc kubenswrapper[4769]: I0131 16:48:45.737905 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:48:45 crc kubenswrapper[4769]: E0131 16:48:45.883139 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:46 crc kubenswrapper[4769]: I0131 16:48:46.750664 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990"} Jan 31 16:48:46 crc kubenswrapper[4769]: I0131 16:48:46.751280 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:46 crc kubenswrapper[4769]: I0131 16:48:46.751353 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:46 crc kubenswrapper[4769]: I0131 16:48:46.751477 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:48:46 crc kubenswrapper[4769]: E0131 16:48:46.751835 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:50 crc kubenswrapper[4769]: I0131 16:48:50.682444 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:48:50 crc kubenswrapper[4769]: I0131 16:48:50.682810 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:48:50 crc kubenswrapper[4769]: I0131 16:48:50.682861 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:48:50 crc kubenswrapper[4769]: I0131 16:48:50.683409 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:48:50 crc kubenswrapper[4769]: I0131 16:48:50.683473 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76" gracePeriod=600 Jan 31 16:48:51 crc kubenswrapper[4769]: I0131 16:48:51.800166 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76" exitCode=0 Jan 31 16:48:51 crc kubenswrapper[4769]: I0131 16:48:51.800229 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76"} Jan 31 16:48:51 crc kubenswrapper[4769]: I0131 16:48:51.800954 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba"} Jan 31 16:48:51 crc kubenswrapper[4769]: I0131 16:48:51.800991 4769 scope.go:117] "RemoveContainer" containerID="54d76b4d5009a4a563cf1e37ee7df5b71a49cb0937af68d18db56c67eb23639a" Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.825657 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="30a79b373b594fc90eeefd92adc4112a9a9bf07fda7fbc1c0b65d00fc6b49abf" exitCode=1 Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.825693 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"30a79b373b594fc90eeefd92adc4112a9a9bf07fda7fbc1c0b65d00fc6b49abf"} Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.826616 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.826667 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.826736 4769 scope.go:117] "RemoveContainer" containerID="30a79b373b594fc90eeefd92adc4112a9a9bf07fda7fbc1c0b65d00fc6b49abf" Jan 31 16:48:53 crc kubenswrapper[4769]: I0131 16:48:53.826751 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:48:54 crc kubenswrapper[4769]: E0131 16:48:54.047035 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.708299 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.708341 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:48:54 crc kubenswrapper[4769]: E0131 16:48:54.708749 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.843252 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442"} Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.845223 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.845405 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:48:54 crc kubenswrapper[4769]: I0131 16:48:54.845708 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:48:54 crc kubenswrapper[4769]: E0131 16:48:54.846399 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:48:59 crc kubenswrapper[4769]: E0131 16:48:59.047961 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:48:59 crc kubenswrapper[4769]: I0131 16:48:59.887730 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:49:04 crc kubenswrapper[4769]: I0131 16:49:04.016004 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:49:04 crc kubenswrapper[4769]: E0131 16:49:04.016210 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:49:04 crc kubenswrapper[4769]: E0131 16:49:04.017947 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:51:06.017912929 +0000 UTC m=+1314.092081598 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:49:06 crc kubenswrapper[4769]: I0131 16:49:06.709881 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:49:06 crc kubenswrapper[4769]: I0131 16:49:06.710428 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:49:06 crc kubenswrapper[4769]: I0131 16:49:06.710671 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:49:06 crc kubenswrapper[4769]: E0131 16:49:06.711280 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:49:08 crc kubenswrapper[4769]: I0131 16:49:08.708171 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:49:08 crc kubenswrapper[4769]: I0131 16:49:08.708596 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:08 crc kubenswrapper[4769]: E0131 16:49:08.873093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:08 crc kubenswrapper[4769]: I0131 16:49:08.962323 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175"} Jan 31 16:49:08 crc kubenswrapper[4769]: I0131 16:49:08.962646 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:49:08 crc kubenswrapper[4769]: I0131 16:49:08.962960 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:08 crc kubenswrapper[4769]: E0131 16:49:08.963168 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:09 crc kubenswrapper[4769]: I0131 16:49:09.971099 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:09 crc kubenswrapper[4769]: E0131 16:49:09.971583 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:13 crc kubenswrapper[4769]: I0131 16:49:13.261435 4769 scope.go:117] "RemoveContainer" containerID="ec01bd28589e76f9acc0d234d7fed5527e60db8aa488775f8e021c00a9fb3380" Jan 31 16:49:13 crc kubenswrapper[4769]: I0131 16:49:13.297052 4769 scope.go:117] "RemoveContainer" containerID="ab76025b5acf3e4d83ec95492415472ad79a514afdd7aa97016d5679d5e3f3b7" Jan 31 16:49:13 crc kubenswrapper[4769]: I0131 16:49:13.320084 4769 scope.go:117] "RemoveContainer" containerID="4a4dde229a2a4fbcf6f788bf971fa08b794a2a29108fa0e5a845d76819f27644" Jan 31 16:49:14 crc kubenswrapper[4769]: I0131 16:49:14.651001 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:49:16 crc kubenswrapper[4769]: I0131 16:49:16.653162 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:49:17 crc kubenswrapper[4769]: I0131 16:49:17.646299 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.647843 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.648201 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.648871 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.648895 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.648932 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" gracePeriod=30 Jan 31 16:49:20 crc kubenswrapper[4769]: I0131 16:49:20.650067 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:49:20 crc kubenswrapper[4769]: E0131 16:49:20.779996 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.064325 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" exitCode=0 Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.064407 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175"} Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.064912 4769 scope.go:117] "RemoveContainer" containerID="fb262fa1f6f7d9e97e398a4b4b057799b7049ced7697df47f43d34dd839f9677" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.065753 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.065799 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:21 crc kubenswrapper[4769]: E0131 16:49:21.066051 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.709019 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.709144 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:49:21 crc kubenswrapper[4769]: I0131 16:49:21.709357 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:49:21 crc kubenswrapper[4769]: E0131 16:49:21.709877 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:49:35 crc kubenswrapper[4769]: I0131 16:49:35.709235 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:49:35 crc kubenswrapper[4769]: I0131 16:49:35.709890 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:49:35 crc kubenswrapper[4769]: I0131 16:49:35.709965 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:35 crc kubenswrapper[4769]: I0131 16:49:35.710002 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:35 crc kubenswrapper[4769]: I0131 16:49:35.710026 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:49:35 crc kubenswrapper[4769]: E0131 16:49:35.710382 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:35 crc kubenswrapper[4769]: E0131 16:49:35.710486 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:49:46 crc kubenswrapper[4769]: I0131 16:49:46.709603 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:49:46 crc kubenswrapper[4769]: I0131 16:49:46.711909 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:49:46 crc kubenswrapper[4769]: I0131 16:49:46.712104 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:49:46 crc kubenswrapper[4769]: E0131 16:49:46.712738 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:49:50 crc kubenswrapper[4769]: I0131 16:49:50.708200 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:50 crc kubenswrapper[4769]: I0131 16:49:50.708589 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:50 crc kubenswrapper[4769]: E0131 16:49:50.885563 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:51 crc kubenswrapper[4769]: I0131 16:49:51.323631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02"} Jan 31 16:49:51 crc kubenswrapper[4769]: I0131 16:49:51.324369 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:51 crc kubenswrapper[4769]: E0131 16:49:51.324686 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:51 crc kubenswrapper[4769]: I0131 16:49:51.324920 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:49:52 crc kubenswrapper[4769]: I0131 16:49:52.337957 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" exitCode=1 Jan 31 16:49:52 crc kubenswrapper[4769]: I0131 16:49:52.338069 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02"} Jan 31 16:49:52 crc kubenswrapper[4769]: I0131 16:49:52.338149 4769 scope.go:117] "RemoveContainer" containerID="c019bdb47bd8b34c4065040df221db97c47cb546f7ae8fbc2c9a32ca1a3b4550" Jan 31 16:49:52 crc kubenswrapper[4769]: I0131 16:49:52.340261 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:52 crc kubenswrapper[4769]: I0131 16:49:52.340302 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:49:52 crc kubenswrapper[4769]: E0131 16:49:52.346134 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:53 crc kubenswrapper[4769]: I0131 16:49:53.372738 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:53 crc kubenswrapper[4769]: I0131 16:49:53.373194 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:49:53 crc kubenswrapper[4769]: E0131 16:49:53.373600 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:53 crc kubenswrapper[4769]: I0131 16:49:53.644869 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:49:54 crc kubenswrapper[4769]: I0131 16:49:54.380761 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:49:54 crc kubenswrapper[4769]: I0131 16:49:54.380813 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:49:54 crc kubenswrapper[4769]: E0131 16:49:54.381279 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:49:58 crc kubenswrapper[4769]: I0131 16:49:58.715327 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:49:58 crc kubenswrapper[4769]: I0131 16:49:58.716424 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:49:58 crc kubenswrapper[4769]: I0131 16:49:58.716925 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:49:58 crc kubenswrapper[4769]: E0131 16:49:58.718274 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:09 crc kubenswrapper[4769]: I0131 16:50:09.708808 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:50:09 crc kubenswrapper[4769]: I0131 16:50:09.709428 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:09 crc kubenswrapper[4769]: E0131 16:50:09.709906 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:12 crc kubenswrapper[4769]: I0131 16:50:12.716600 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:50:12 crc kubenswrapper[4769]: I0131 16:50:12.716915 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:50:12 crc kubenswrapper[4769]: I0131 16:50:12.717028 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589378 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" exitCode=1 Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589674 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" exitCode=1 Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589526 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6"} Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589710 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791"} Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75"} Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.589748 4769 scope.go:117] "RemoveContainer" containerID="ec841d4703906bc361c06a04280f41431d95316bf904b3875695ae6231d07851" Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.590777 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.590921 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:13 crc kubenswrapper[4769]: E0131 16:50:13.591681 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:13 crc kubenswrapper[4769]: I0131 16:50:13.659025 4769 scope.go:117] "RemoveContainer" containerID="210ebb4cb17b0455695b2214255395c978617669e044617718c5b601b1756117" Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.611557 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" exitCode=1 Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.611610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6"} Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.611877 4769 scope.go:117] "RemoveContainer" containerID="3709a01503194a9a5539597f10f66fcda8b408815ad56637ea81a459236ee06b" Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.612479 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.612623 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:14 crc kubenswrapper[4769]: I0131 16:50:14.612803 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:14 crc kubenswrapper[4769]: E0131 16:50:14.613353 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:15 crc kubenswrapper[4769]: I0131 16:50:15.632672 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:15 crc kubenswrapper[4769]: I0131 16:50:15.632792 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:15 crc kubenswrapper[4769]: I0131 16:50:15.632910 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:15 crc kubenswrapper[4769]: E0131 16:50:15.633206 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:20 crc kubenswrapper[4769]: I0131 16:50:20.708105 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:50:20 crc kubenswrapper[4769]: I0131 16:50:20.708747 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:20 crc kubenswrapper[4769]: E0131 16:50:20.709103 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:28 crc kubenswrapper[4769]: I0131 16:50:28.709975 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:28 crc kubenswrapper[4769]: I0131 16:50:28.710785 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:28 crc kubenswrapper[4769]: I0131 16:50:28.710968 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:28 crc kubenswrapper[4769]: E0131 16:50:28.711433 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:32 crc kubenswrapper[4769]: I0131 16:50:32.716071 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:50:32 crc kubenswrapper[4769]: I0131 16:50:32.716597 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:32 crc kubenswrapper[4769]: E0131 16:50:32.716945 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.835462 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990" exitCode=1 Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.835646 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990"} Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.835770 4769 scope.go:117] "RemoveContainer" containerID="09b7562c3d466a41880f5909b6af118ad218111a5b099f6f918545032b3764fa" Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.836561 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.836671 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.836714 4769 scope.go:117] "RemoveContainer" containerID="14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990" Jan 31 16:50:34 crc kubenswrapper[4769]: I0131 16:50:34.836880 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:34 crc kubenswrapper[4769]: E0131 16:50:34.837412 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:44 crc kubenswrapper[4769]: I0131 16:50:44.707862 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:50:44 crc kubenswrapper[4769]: I0131 16:50:44.708469 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:44 crc kubenswrapper[4769]: E0131 16:50:44.875216 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:44 crc kubenswrapper[4769]: I0131 16:50:44.930291 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8"} Jan 31 16:50:44 crc kubenswrapper[4769]: I0131 16:50:44.930612 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:50:44 crc kubenswrapper[4769]: I0131 16:50:44.931228 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:44 crc kubenswrapper[4769]: E0131 16:50:44.931665 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:45 crc kubenswrapper[4769]: I0131 16:50:45.938613 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:45 crc kubenswrapper[4769]: E0131 16:50:45.938922 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.708809 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.709228 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.709275 4769 scope.go:117] "RemoveContainer" containerID="14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.709398 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:47 crc kubenswrapper[4769]: E0131 16:50:47.903677 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.964778 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805"} Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.965549 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.965623 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:50:47 crc kubenswrapper[4769]: I0131 16:50:47.965758 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:50:47 crc kubenswrapper[4769]: E0131 16:50:47.966157 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:50:50 crc kubenswrapper[4769]: I0131 16:50:50.651767 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:51 crc kubenswrapper[4769]: I0131 16:50:51.647585 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:53 crc kubenswrapper[4769]: I0131 16:50:53.647181 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.647741 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.648019 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.648383 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.649162 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.649189 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.649220 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" gracePeriod=30 Jan 31 16:50:56 crc kubenswrapper[4769]: I0131 16:50:56.651082 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:50:56 crc kubenswrapper[4769]: E0131 16:50:56.786775 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:50:57 crc kubenswrapper[4769]: I0131 16:50:57.056176 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" exitCode=0 Jan 31 16:50:57 crc kubenswrapper[4769]: I0131 16:50:57.056228 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8"} Jan 31 16:50:57 crc kubenswrapper[4769]: I0131 16:50:57.056265 4769 scope.go:117] "RemoveContainer" containerID="4f3e736a2db9b563f3db8d17a4bab1c2f337d7ba0646fa64024a653ce3113175" Jan 31 16:50:57 crc kubenswrapper[4769]: I0131 16:50:57.057181 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:50:57 crc kubenswrapper[4769]: I0131 16:50:57.057246 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:50:57 crc kubenswrapper[4769]: E0131 16:50:57.057706 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:51:00 crc kubenswrapper[4769]: I0131 16:51:00.708856 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:51:00 crc kubenswrapper[4769]: I0131 16:51:00.709252 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:51:00 crc kubenswrapper[4769]: I0131 16:51:00.709430 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:51:00 crc kubenswrapper[4769]: E0131 16:51:00.709907 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:51:02 crc kubenswrapper[4769]: E0131 16:51:02.889514 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:51:03 crc kubenswrapper[4769]: I0131 16:51:03.110652 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:51:06 crc kubenswrapper[4769]: I0131 16:51:06.055009 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:51:06 crc kubenswrapper[4769]: E0131 16:51:06.055211 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:51:06 crc kubenswrapper[4769]: E0131 16:51:06.055307 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:53:08.055285283 +0000 UTC m=+1436.129453952 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:51:07 crc kubenswrapper[4769]: I0131 16:51:07.708770 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:51:07 crc kubenswrapper[4769]: I0131 16:51:07.709111 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:51:07 crc kubenswrapper[4769]: E0131 16:51:07.709485 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:51:13 crc kubenswrapper[4769]: I0131 16:51:13.708465 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:51:13 crc kubenswrapper[4769]: I0131 16:51:13.708872 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:51:13 crc kubenswrapper[4769]: I0131 16:51:13.708988 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:51:13 crc kubenswrapper[4769]: E0131 16:51:13.709313 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:51:20 crc kubenswrapper[4769]: I0131 16:51:20.682281 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:51:20 crc kubenswrapper[4769]: I0131 16:51:20.682356 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:51:20 crc kubenswrapper[4769]: I0131 16:51:20.708179 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:51:20 crc kubenswrapper[4769]: I0131 16:51:20.708234 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:51:20 crc kubenswrapper[4769]: E0131 16:51:20.708747 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:51:25 crc kubenswrapper[4769]: I0131 16:51:25.709948 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:51:25 crc kubenswrapper[4769]: I0131 16:51:25.710725 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:51:25 crc kubenswrapper[4769]: I0131 16:51:25.710951 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:51:25 crc kubenswrapper[4769]: E0131 16:51:25.711577 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:51:32 crc kubenswrapper[4769]: I0131 16:51:32.717375 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:51:32 crc kubenswrapper[4769]: I0131 16:51:32.717607 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:51:32 crc kubenswrapper[4769]: E0131 16:51:32.717768 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:51:37 crc kubenswrapper[4769]: I0131 16:51:37.709571 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:51:37 crc kubenswrapper[4769]: I0131 16:51:37.710320 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:51:37 crc kubenswrapper[4769]: I0131 16:51:37.710527 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:51:37 crc kubenswrapper[4769]: E0131 16:51:37.710966 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:51:44 crc kubenswrapper[4769]: I0131 16:51:44.709107 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:51:44 crc kubenswrapper[4769]: I0131 16:51:44.709619 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:51:44 crc kubenswrapper[4769]: E0131 16:51:44.709807 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:51:50 crc kubenswrapper[4769]: I0131 16:51:50.682481 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:51:50 crc kubenswrapper[4769]: I0131 16:51:50.684726 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:51:50 crc kubenswrapper[4769]: I0131 16:51:50.709598 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:51:50 crc kubenswrapper[4769]: I0131 16:51:50.709765 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:51:50 crc kubenswrapper[4769]: I0131 16:51:50.709950 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:51:50 crc kubenswrapper[4769]: E0131 16:51:50.710449 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:51:58 crc kubenswrapper[4769]: I0131 16:51:58.708821 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:51:58 crc kubenswrapper[4769]: I0131 16:51:58.709414 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:51:58 crc kubenswrapper[4769]: E0131 16:51:58.709916 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:04 crc kubenswrapper[4769]: I0131 16:52:04.710081 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:52:04 crc kubenswrapper[4769]: I0131 16:52:04.710794 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:52:04 crc kubenswrapper[4769]: I0131 16:52:04.710973 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:52:04 crc kubenswrapper[4769]: E0131 16:52:04.711456 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:52:13 crc kubenswrapper[4769]: I0131 16:52:13.708092 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:13 crc kubenswrapper[4769]: I0131 16:52:13.708767 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:52:13 crc kubenswrapper[4769]: E0131 16:52:13.709158 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:15 crc kubenswrapper[4769]: I0131 16:52:15.709227 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:52:15 crc kubenswrapper[4769]: I0131 16:52:15.710137 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:52:15 crc kubenswrapper[4769]: I0131 16:52:15.710286 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:52:15 crc kubenswrapper[4769]: E0131 16:52:15.710666 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:52:20 crc kubenswrapper[4769]: I0131 16:52:20.682272 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:52:20 crc kubenswrapper[4769]: I0131 16:52:20.682781 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:52:20 crc kubenswrapper[4769]: I0131 16:52:20.682853 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:52:20 crc kubenswrapper[4769]: I0131 16:52:20.683876 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:52:20 crc kubenswrapper[4769]: I0131 16:52:20.684025 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba" gracePeriod=600 Jan 31 16:52:21 crc kubenswrapper[4769]: I0131 16:52:21.775566 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba" exitCode=0 Jan 31 16:52:21 crc kubenswrapper[4769]: I0131 16:52:21.775667 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba"} Jan 31 16:52:21 crc kubenswrapper[4769]: I0131 16:52:21.776383 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b"} Jan 31 16:52:21 crc kubenswrapper[4769]: I0131 16:52:21.776418 4769 scope.go:117] "RemoveContainer" containerID="2affff92918addd5ee0e3565d5ea4c6af01f170b7bf40a5a6a676c61598fac76" Jan 31 16:52:27 crc kubenswrapper[4769]: I0131 16:52:27.708082 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:27 crc kubenswrapper[4769]: I0131 16:52:27.708644 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:52:27 crc kubenswrapper[4769]: E0131 16:52:27.709014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:29 crc kubenswrapper[4769]: I0131 16:52:29.708821 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:52:29 crc kubenswrapper[4769]: I0131 16:52:29.709157 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:52:29 crc kubenswrapper[4769]: I0131 16:52:29.709282 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:52:29 crc kubenswrapper[4769]: E0131 16:52:29.709632 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:52:39 crc kubenswrapper[4769]: I0131 16:52:39.709269 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:39 crc kubenswrapper[4769]: I0131 16:52:39.710060 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:52:39 crc kubenswrapper[4769]: E0131 16:52:39.874001 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:39 crc kubenswrapper[4769]: I0131 16:52:39.935464 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242"} Jan 31 16:52:39 crc kubenswrapper[4769]: I0131 16:52:39.936098 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:52:39 crc kubenswrapper[4769]: I0131 16:52:39.936763 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:39 crc kubenswrapper[4769]: E0131 16:52:39.937149 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:40 crc kubenswrapper[4769]: I0131 16:52:40.947154 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" exitCode=1 Jan 31 16:52:40 crc kubenswrapper[4769]: I0131 16:52:40.947216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242"} Jan 31 16:52:40 crc kubenswrapper[4769]: I0131 16:52:40.947429 4769 scope.go:117] "RemoveContainer" containerID="5803c1dff1ed7f6bf06d14f47ec0fe2b2c11ff79e8b673019de6c2c00e1b9a02" Jan 31 16:52:40 crc kubenswrapper[4769]: I0131 16:52:40.947956 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:40 crc kubenswrapper[4769]: I0131 16:52:40.947986 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:52:40 crc kubenswrapper[4769]: E0131 16:52:40.948325 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:41 crc kubenswrapper[4769]: I0131 16:52:41.645234 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:52:41 crc kubenswrapper[4769]: I0131 16:52:41.968237 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:41 crc kubenswrapper[4769]: I0131 16:52:41.968263 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:52:41 crc kubenswrapper[4769]: E0131 16:52:41.968560 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:42 crc kubenswrapper[4769]: I0131 16:52:42.974577 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:42 crc kubenswrapper[4769]: I0131 16:52:42.974937 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:52:42 crc kubenswrapper[4769]: E0131 16:52:42.975296 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:44 crc kubenswrapper[4769]: I0131 16:52:44.708210 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:52:44 crc kubenswrapper[4769]: I0131 16:52:44.708510 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:52:44 crc kubenswrapper[4769]: I0131 16:52:44.708611 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:52:44 crc kubenswrapper[4769]: E0131 16:52:44.708903 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:52:55 crc kubenswrapper[4769]: I0131 16:52:55.707949 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:52:55 crc kubenswrapper[4769]: I0131 16:52:55.708271 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:52:55 crc kubenswrapper[4769]: E0131 16:52:55.708589 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:52:59 crc kubenswrapper[4769]: I0131 16:52:59.709179 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:52:59 crc kubenswrapper[4769]: I0131 16:52:59.709789 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:52:59 crc kubenswrapper[4769]: I0131 16:52:59.709988 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:53:00 crc kubenswrapper[4769]: I0131 16:53:00.143821 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd"} Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167151 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" exitCode=1 Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167207 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" exitCode=1 Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167229 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" exitCode=1 Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167262 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd"} Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167313 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834"} Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167339 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70"} Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.167373 4769 scope.go:117] "RemoveContainer" containerID="6a79b7690071072023a0b7227ba746e7c742721d924455e4e2edb176c938ed75" Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.168201 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.168333 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.168565 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:01 crc kubenswrapper[4769]: E0131 16:53:01.169171 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.234725 4769 scope.go:117] "RemoveContainer" containerID="27d9f71990a770d8924f8e4d469dd64f5b068309535b1ae6ecb4774de3aba0a6" Jan 31 16:53:01 crc kubenswrapper[4769]: I0131 16:53:01.288240 4769 scope.go:117] "RemoveContainer" containerID="3d82bfccbbfd48f342623eaefff8053c663d95bc8da0e0dda99c124952665791" Jan 31 16:53:02 crc kubenswrapper[4769]: I0131 16:53:02.188586 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:02 crc kubenswrapper[4769]: I0131 16:53:02.189091 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:02 crc kubenswrapper[4769]: I0131 16:53:02.189286 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:02 crc kubenswrapper[4769]: E0131 16:53:02.189786 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:06 crc kubenswrapper[4769]: E0131 16:53:06.112077 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:53:06 crc kubenswrapper[4769]: I0131 16:53:06.222965 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:53:08 crc kubenswrapper[4769]: I0131 16:53:08.061416 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:53:08 crc kubenswrapper[4769]: E0131 16:53:08.061646 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:53:08 crc kubenswrapper[4769]: E0131 16:53:08.062063 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:55:10.062030288 +0000 UTC m=+1558.136198997 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:53:08 crc kubenswrapper[4769]: I0131 16:53:08.709033 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:53:08 crc kubenswrapper[4769]: I0131 16:53:08.709386 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:08 crc kubenswrapper[4769]: E0131 16:53:08.709962 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:16 crc kubenswrapper[4769]: I0131 16:53:16.708825 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:16 crc kubenswrapper[4769]: I0131 16:53:16.709481 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:16 crc kubenswrapper[4769]: I0131 16:53:16.709679 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:16 crc kubenswrapper[4769]: E0131 16:53:16.710141 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.508267 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.511790 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.519669 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.639959 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.640054 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.640223 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s67jw\" (UniqueName: \"kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.741546 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.741642 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.741732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s67jw\" (UniqueName: \"kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.742062 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.742063 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.761445 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s67jw\" (UniqueName: \"kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw\") pod \"certified-operators-x27m5\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:18 crc kubenswrapper[4769]: I0131 16:53:18.835929 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:19 crc kubenswrapper[4769]: I0131 16:53:19.088364 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:19 crc kubenswrapper[4769]: I0131 16:53:19.344599 4769 generic.go:334] "Generic (PLEG): container finished" podID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerID="6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe" exitCode=0 Jan 31 16:53:19 crc kubenswrapper[4769]: I0131 16:53:19.344669 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerDied","Data":"6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe"} Jan 31 16:53:19 crc kubenswrapper[4769]: I0131 16:53:19.344906 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerStarted","Data":"855b12010bdc0c73a0b659e5dad90ea967652e4e7496b0360714df3ba0e520fd"} Jan 31 16:53:19 crc kubenswrapper[4769]: I0131 16:53:19.346115 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 16:53:20 crc kubenswrapper[4769]: I0131 16:53:20.355650 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerStarted","Data":"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3"} Jan 31 16:53:21 crc kubenswrapper[4769]: I0131 16:53:21.366703 4769 generic.go:334] "Generic (PLEG): container finished" podID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerID="7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3" exitCode=0 Jan 31 16:53:21 crc kubenswrapper[4769]: I0131 16:53:21.366761 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerDied","Data":"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3"} Jan 31 16:53:22 crc kubenswrapper[4769]: I0131 16:53:22.381590 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerStarted","Data":"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5"} Jan 31 16:53:22 crc kubenswrapper[4769]: I0131 16:53:22.423454 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x27m5" podStartSLOduration=1.9901940329999999 podStartE2EDuration="4.423425011s" podCreationTimestamp="2026-01-31 16:53:18 +0000 UTC" firstStartedPulling="2026-01-31 16:53:19.345855226 +0000 UTC m=+1447.420023895" lastFinishedPulling="2026-01-31 16:53:21.779086194 +0000 UTC m=+1449.853254873" observedRunningTime="2026-01-31 16:53:22.411327991 +0000 UTC m=+1450.485496720" watchObservedRunningTime="2026-01-31 16:53:22.423425011 +0000 UTC m=+1450.497593720" Jan 31 16:53:23 crc kubenswrapper[4769]: I0131 16:53:23.708075 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:53:23 crc kubenswrapper[4769]: I0131 16:53:23.708116 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:23 crc kubenswrapper[4769]: E0131 16:53:23.708385 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.460598 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442" exitCode=1 Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.460673 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442"} Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.461286 4769 scope.go:117] "RemoveContainer" containerID="30a79b373b594fc90eeefd92adc4112a9a9bf07fda7fbc1c0b65d00fc6b49abf" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.462598 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.462726 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.462891 4769 scope.go:117] "RemoveContainer" containerID="f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.462927 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:28 crc kubenswrapper[4769]: E0131 16:53:28.464278 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.836817 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.836878 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:28 crc kubenswrapper[4769]: I0131 16:53:28.914034 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:29 crc kubenswrapper[4769]: I0131 16:53:29.543104 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:29 crc kubenswrapper[4769]: I0131 16:53:29.612986 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.505346 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x27m5" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="registry-server" containerID="cri-o://2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5" gracePeriod=2 Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.577571 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.581044 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.588947 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.649408 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.653978 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68wvh\" (UniqueName: \"kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.654241 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.756899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68wvh\" (UniqueName: \"kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.757011 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.757042 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.757970 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.758026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.779395 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68wvh\" (UniqueName: \"kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh\") pod \"redhat-operators-wwjzp\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.898734 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:31 crc kubenswrapper[4769]: I0131 16:53:31.975380 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.061813 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s67jw\" (UniqueName: \"kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw\") pod \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.061957 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities\") pod \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.062013 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content\") pod \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\" (UID: \"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d\") " Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.063257 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities" (OuterVolumeSpecName: "utilities") pod "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" (UID: "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.068680 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw" (OuterVolumeSpecName: "kube-api-access-s67jw") pod "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" (UID: "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d"). InnerVolumeSpecName "kube-api-access-s67jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.105127 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" (UID: "ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.163260 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.163293 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.163303 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s67jw\" (UniqueName: \"kubernetes.io/projected/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d-kube-api-access-s67jw\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.183216 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:32 crc kubenswrapper[4769]: W0131 16:53:32.186488 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c2354ec_99b5_4d49_b323_8fbc638cd57d.slice/crio-587f86d0e4deeb199ed94a05e078164b2da3febd409ea3bf11bea18cb5d3960d WatchSource:0}: Error finding container 587f86d0e4deeb199ed94a05e078164b2da3febd409ea3bf11bea18cb5d3960d: Status 404 returned error can't find the container with id 587f86d0e4deeb199ed94a05e078164b2da3febd409ea3bf11bea18cb5d3960d Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.513248 4769 generic.go:334] "Generic (PLEG): container finished" podID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerID="579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214" exitCode=0 Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.513330 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerDied","Data":"579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214"} Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.513631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerStarted","Data":"587f86d0e4deeb199ed94a05e078164b2da3febd409ea3bf11bea18cb5d3960d"} Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.520270 4769 generic.go:334] "Generic (PLEG): container finished" podID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerID="2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5" exitCode=0 Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.520320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerDied","Data":"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5"} Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.520355 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x27m5" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.520371 4769 scope.go:117] "RemoveContainer" containerID="2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.520359 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x27m5" event={"ID":"ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d","Type":"ContainerDied","Data":"855b12010bdc0c73a0b659e5dad90ea967652e4e7496b0360714df3ba0e520fd"} Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.546513 4769 scope.go:117] "RemoveContainer" containerID="7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.560320 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.568361 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x27m5"] Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.580387 4769 scope.go:117] "RemoveContainer" containerID="6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.597670 4769 scope.go:117] "RemoveContainer" containerID="2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5" Jan 31 16:53:32 crc kubenswrapper[4769]: E0131 16:53:32.598355 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5\": container with ID starting with 2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5 not found: ID does not exist" containerID="2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.598606 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5"} err="failed to get container status \"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5\": rpc error: code = NotFound desc = could not find container \"2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5\": container with ID starting with 2a89c37569e2f625a85f2e54ca6ed285b049878f8d603a0ca0c304a4d1b27db5 not found: ID does not exist" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.598835 4769 scope.go:117] "RemoveContainer" containerID="7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3" Jan 31 16:53:32 crc kubenswrapper[4769]: E0131 16:53:32.599694 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3\": container with ID starting with 7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3 not found: ID does not exist" containerID="7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.599782 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3"} err="failed to get container status \"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3\": rpc error: code = NotFound desc = could not find container \"7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3\": container with ID starting with 7d0f2d10d459c2bb7e87e38a2ba77247a223f41f8931b9b64ff6b78056fe83a3 not found: ID does not exist" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.599843 4769 scope.go:117] "RemoveContainer" containerID="6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe" Jan 31 16:53:32 crc kubenswrapper[4769]: E0131 16:53:32.600433 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe\": container with ID starting with 6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe not found: ID does not exist" containerID="6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.600460 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe"} err="failed to get container status \"6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe\": rpc error: code = NotFound desc = could not find container \"6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe\": container with ID starting with 6fe7d371f345d6e4049888919757a5419fd172d0e016087614ba21301d3e22fe not found: ID does not exist" Jan 31 16:53:32 crc kubenswrapper[4769]: I0131 16:53:32.714909 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" path="/var/lib/kubelet/pods/ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d/volumes" Jan 31 16:53:33 crc kubenswrapper[4769]: I0131 16:53:33.530377 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerStarted","Data":"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf"} Jan 31 16:53:34 crc kubenswrapper[4769]: I0131 16:53:34.545965 4769 generic.go:334] "Generic (PLEG): container finished" podID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerID="869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf" exitCode=0 Jan 31 16:53:34 crc kubenswrapper[4769]: I0131 16:53:34.546012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerDied","Data":"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf"} Jan 31 16:53:34 crc kubenswrapper[4769]: I0131 16:53:34.708347 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:53:34 crc kubenswrapper[4769]: I0131 16:53:34.708396 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:34 crc kubenswrapper[4769]: E0131 16:53:34.708850 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:35 crc kubenswrapper[4769]: I0131 16:53:35.557105 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerStarted","Data":"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4"} Jan 31 16:53:35 crc kubenswrapper[4769]: I0131 16:53:35.589756 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wwjzp" podStartSLOduration=1.940848907 podStartE2EDuration="4.589729998s" podCreationTimestamp="2026-01-31 16:53:31 +0000 UTC" firstStartedPulling="2026-01-31 16:53:32.515041519 +0000 UTC m=+1460.589210188" lastFinishedPulling="2026-01-31 16:53:35.16392257 +0000 UTC m=+1463.238091279" observedRunningTime="2026-01-31 16:53:35.575753427 +0000 UTC m=+1463.649922126" watchObservedRunningTime="2026-01-31 16:53:35.589729998 +0000 UTC m=+1463.663898697" Jan 31 16:53:40 crc kubenswrapper[4769]: I0131 16:53:40.709302 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:40 crc kubenswrapper[4769]: I0131 16:53:40.709699 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:40 crc kubenswrapper[4769]: I0131 16:53:40.709801 4769 scope.go:117] "RemoveContainer" containerID="f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442" Jan 31 16:53:40 crc kubenswrapper[4769]: I0131 16:53:40.709810 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:40 crc kubenswrapper[4769]: E0131 16:53:40.953161 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.614819 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc"} Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.615884 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.616025 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.616210 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:41 crc kubenswrapper[4769]: E0131 16:53:41.616829 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.975775 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:41 crc kubenswrapper[4769]: I0131 16:53:41.975849 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:43 crc kubenswrapper[4769]: I0131 16:53:43.041420 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wwjzp" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="registry-server" probeResult="failure" output=< Jan 31 16:53:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 16:53:43 crc kubenswrapper[4769]: > Jan 31 16:53:49 crc kubenswrapper[4769]: I0131 16:53:49.708190 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:53:49 crc kubenswrapper[4769]: I0131 16:53:49.708236 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:49 crc kubenswrapper[4769]: E0131 16:53:49.883196 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.066163 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/root-account-create-update-jvd9g"] Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.073530 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/root-account-create-update-jvd9g"] Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.699674 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7"} Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.699975 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.700416 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:50 crc kubenswrapper[4769]: E0131 16:53:50.700840 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:50 crc kubenswrapper[4769]: I0131 16:53:50.720957 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2425631f-500c-4ce4-9a21-7913d950b573" path="/var/lib/kubelet/pods/2425631f-500c-4ce4-9a21-7913d950b573/volumes" Jan 31 16:53:51 crc kubenswrapper[4769]: I0131 16:53:51.711351 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:53:51 crc kubenswrapper[4769]: E0131 16:53:51.712203 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:53:52 crc kubenswrapper[4769]: I0131 16:53:52.034933 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:52 crc kubenswrapper[4769]: I0131 16:53:52.111001 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:52 crc kubenswrapper[4769]: I0131 16:53:52.273639 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:53 crc kubenswrapper[4769]: I0131 16:53:53.724373 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wwjzp" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="registry-server" containerID="cri-o://b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4" gracePeriod=2 Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.170334 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.341488 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content\") pod \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.341567 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities\") pod \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.341657 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68wvh\" (UniqueName: \"kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh\") pod \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\" (UID: \"5c2354ec-99b5-4d49-b323-8fbc638cd57d\") " Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.342558 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities" (OuterVolumeSpecName: "utilities") pod "5c2354ec-99b5-4d49-b323-8fbc638cd57d" (UID: "5c2354ec-99b5-4d49-b323-8fbc638cd57d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.347274 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh" (OuterVolumeSpecName: "kube-api-access-68wvh") pod "5c2354ec-99b5-4d49-b323-8fbc638cd57d" (UID: "5c2354ec-99b5-4d49-b323-8fbc638cd57d"). InnerVolumeSpecName "kube-api-access-68wvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.443253 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.443285 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68wvh\" (UniqueName: \"kubernetes.io/projected/5c2354ec-99b5-4d49-b323-8fbc638cd57d-kube-api-access-68wvh\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.492422 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c2354ec-99b5-4d49-b323-8fbc638cd57d" (UID: "5c2354ec-99b5-4d49-b323-8fbc638cd57d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.544185 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c2354ec-99b5-4d49-b323-8fbc638cd57d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.733317 4769 generic.go:334] "Generic (PLEG): container finished" podID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerID="b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4" exitCode=0 Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.733399 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerDied","Data":"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4"} Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.733440 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wwjzp" event={"ID":"5c2354ec-99b5-4d49-b323-8fbc638cd57d","Type":"ContainerDied","Data":"587f86d0e4deeb199ed94a05e078164b2da3febd409ea3bf11bea18cb5d3960d"} Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.733474 4769 scope.go:117] "RemoveContainer" containerID="b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.733646 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wwjzp" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.762795 4769 scope.go:117] "RemoveContainer" containerID="869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.767782 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.773657 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wwjzp"] Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.783696 4769 scope.go:117] "RemoveContainer" containerID="579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.823135 4769 scope.go:117] "RemoveContainer" containerID="b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4" Jan 31 16:53:54 crc kubenswrapper[4769]: E0131 16:53:54.823944 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4\": container with ID starting with b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4 not found: ID does not exist" containerID="b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.824122 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4"} err="failed to get container status \"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4\": rpc error: code = NotFound desc = could not find container \"b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4\": container with ID starting with b501b0431b0bd53cd258683c44a84e68ef30c06515bd60e7a64b7d3f97e586e4 not found: ID does not exist" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.824326 4769 scope.go:117] "RemoveContainer" containerID="869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf" Jan 31 16:53:54 crc kubenswrapper[4769]: E0131 16:53:54.824961 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf\": container with ID starting with 869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf not found: ID does not exist" containerID="869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.825012 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf"} err="failed to get container status \"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf\": rpc error: code = NotFound desc = could not find container \"869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf\": container with ID starting with 869d0742e57349a24782ad4922dceb965758a85def50ef48b5225474bace4baf not found: ID does not exist" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.825046 4769 scope.go:117] "RemoveContainer" containerID="579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214" Jan 31 16:53:54 crc kubenswrapper[4769]: E0131 16:53:54.825450 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214\": container with ID starting with 579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214 not found: ID does not exist" containerID="579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214" Jan 31 16:53:54 crc kubenswrapper[4769]: I0131 16:53:54.825483 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214"} err="failed to get container status \"579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214\": rpc error: code = NotFound desc = could not find container \"579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214\": container with ID starting with 579c01b0731e92ab798bd5294c3fb45ae42f14b71ed665df6e46f85857069214 not found: ID does not exist" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.650302 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.650674 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.708231 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.708321 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.708455 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:53:56 crc kubenswrapper[4769]: E0131 16:53:56.708950 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:53:56 crc kubenswrapper[4769]: I0131 16:53:56.726369 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" path="/var/lib/kubelet/pods/5c2354ec-99b5-4d49-b323-8fbc638cd57d/volumes" Jan 31 16:53:59 crc kubenswrapper[4769]: I0131 16:53:59.646926 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:54:01 crc kubenswrapper[4769]: I0131 16:54:01.647035 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.648147 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.649520 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.650641 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.650774 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.650893 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" gracePeriod=30 Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.652133 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:54:02 crc kubenswrapper[4769]: E0131 16:54:02.774203 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.813709 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc" exitCode=1 Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.813791 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc"} Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.813831 4769 scope.go:117] "RemoveContainer" containerID="f2b74b0817580bbf8dee46673071db4df57036b13648d57687a8891be838e442" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.814485 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.814579 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.814659 4769 scope.go:117] "RemoveContainer" containerID="83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.814685 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:02 crc kubenswrapper[4769]: E0131 16:54:02.814978 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.818782 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" exitCode=0 Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.818831 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7"} Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.819351 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.819390 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:02 crc kubenswrapper[4769]: E0131 16:54:02.819637 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:02 crc kubenswrapper[4769]: I0131 16:54:02.868320 4769 scope.go:117] "RemoveContainer" containerID="e03bfde2874d63f3a16dbfd108c258beb187cba5b2243572007a561bb34f5cc8" Jan 31 16:54:03 crc kubenswrapper[4769]: I0131 16:54:03.839943 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:03 crc kubenswrapper[4769]: I0131 16:54:03.839981 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:03 crc kubenswrapper[4769]: E0131 16:54:03.840233 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:13 crc kubenswrapper[4769]: I0131 16:54:13.469806 4769 scope.go:117] "RemoveContainer" containerID="863c7b93297b0da7036a3d10a284c9ae43b0dbb3879920d5fa8d47e63a6b623b" Jan 31 16:54:14 crc kubenswrapper[4769]: I0131 16:54:14.711835 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:14 crc kubenswrapper[4769]: I0131 16:54:14.712165 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:14 crc kubenswrapper[4769]: I0131 16:54:14.712237 4769 scope.go:117] "RemoveContainer" containerID="83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc" Jan 31 16:54:14 crc kubenswrapper[4769]: I0131 16:54:14.712244 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:14 crc kubenswrapper[4769]: E0131 16:54:14.712548 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:18 crc kubenswrapper[4769]: I0131 16:54:18.708236 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:18 crc kubenswrapper[4769]: I0131 16:54:18.708663 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:18 crc kubenswrapper[4769]: E0131 16:54:18.708984 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:26 crc kubenswrapper[4769]: I0131 16:54:26.709066 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:26 crc kubenswrapper[4769]: I0131 16:54:26.709909 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:26 crc kubenswrapper[4769]: I0131 16:54:26.710071 4769 scope.go:117] "RemoveContainer" containerID="83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc" Jan 31 16:54:26 crc kubenswrapper[4769]: I0131 16:54:26.710085 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:26 crc kubenswrapper[4769]: E0131 16:54:26.872340 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:27 crc kubenswrapper[4769]: I0131 16:54:27.052614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09"} Jan 31 16:54:27 crc kubenswrapper[4769]: I0131 16:54:27.053255 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:27 crc kubenswrapper[4769]: I0131 16:54:27.053311 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:27 crc kubenswrapper[4769]: I0131 16:54:27.053394 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:27 crc kubenswrapper[4769]: E0131 16:54:27.053646 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:30 crc kubenswrapper[4769]: I0131 16:54:30.708756 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:30 crc kubenswrapper[4769]: I0131 16:54:30.709076 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:30 crc kubenswrapper[4769]: E0131 16:54:30.709377 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:39 crc kubenswrapper[4769]: I0131 16:54:39.708638 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:39 crc kubenswrapper[4769]: I0131 16:54:39.708967 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:39 crc kubenswrapper[4769]: I0131 16:54:39.709097 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:39 crc kubenswrapper[4769]: E0131 16:54:39.709453 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:45 crc kubenswrapper[4769]: I0131 16:54:45.708461 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:45 crc kubenswrapper[4769]: I0131 16:54:45.708995 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:45 crc kubenswrapper[4769]: E0131 16:54:45.709181 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:54:50 crc kubenswrapper[4769]: I0131 16:54:50.682551 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:54:50 crc kubenswrapper[4769]: I0131 16:54:50.682657 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:54:53 crc kubenswrapper[4769]: I0131 16:54:53.710357 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:54:53 crc kubenswrapper[4769]: I0131 16:54:53.710965 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:54:53 crc kubenswrapper[4769]: I0131 16:54:53.711205 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:54:53 crc kubenswrapper[4769]: E0131 16:54:53.711883 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:54:57 crc kubenswrapper[4769]: I0131 16:54:57.708279 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:54:57 crc kubenswrapper[4769]: I0131 16:54:57.709434 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:54:57 crc kubenswrapper[4769]: E0131 16:54:57.709737 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:55:04 crc kubenswrapper[4769]: I0131 16:55:04.708814 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:04 crc kubenswrapper[4769]: I0131 16:55:04.709563 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:04 crc kubenswrapper[4769]: I0131 16:55:04.709812 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:04 crc kubenswrapper[4769]: E0131 16:55:04.710295 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:09 crc kubenswrapper[4769]: E0131 16:55:09.224936 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:55:09 crc kubenswrapper[4769]: I0131 16:55:09.441340 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:55:10 crc kubenswrapper[4769]: I0131 16:55:10.111398 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:55:10 crc kubenswrapper[4769]: E0131 16:55:10.111656 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:55:10 crc kubenswrapper[4769]: E0131 16:55:10.111788 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:57:12.111759022 +0000 UTC m=+1680.185927731 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:55:11 crc kubenswrapper[4769]: I0131 16:55:11.708444 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:55:11 crc kubenswrapper[4769]: I0131 16:55:11.708798 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:55:11 crc kubenswrapper[4769]: E0131 16:55:11.709164 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:55:17 crc kubenswrapper[4769]: I0131 16:55:17.709768 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:17 crc kubenswrapper[4769]: I0131 16:55:17.711768 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:17 crc kubenswrapper[4769]: I0131 16:55:17.712128 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:17 crc kubenswrapper[4769]: E0131 16:55:17.712850 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:20 crc kubenswrapper[4769]: I0131 16:55:20.681856 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:55:20 crc kubenswrapper[4769]: I0131 16:55:20.682321 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.552940 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805" exitCode=1 Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.553010 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805"} Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.553071 4769 scope.go:117] "RemoveContainer" containerID="14e13a74ac424327d602e111f611c13f87518391992630077fd51df1b2cb4990" Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.554721 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.554849 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.554894 4769 scope.go:117] "RemoveContainer" containerID="d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805" Jan 31 16:55:21 crc kubenswrapper[4769]: I0131 16:55:21.555034 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:21 crc kubenswrapper[4769]: E0131 16:55:21.555581 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:22 crc kubenswrapper[4769]: I0131 16:55:22.715130 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:55:22 crc kubenswrapper[4769]: I0131 16:55:22.716512 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:55:22 crc kubenswrapper[4769]: E0131 16:55:22.716892 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:55:33 crc kubenswrapper[4769]: I0131 16:55:33.708752 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:55:33 crc kubenswrapper[4769]: I0131 16:55:33.709269 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:55:33 crc kubenswrapper[4769]: E0131 16:55:33.710007 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:55:34 crc kubenswrapper[4769]: I0131 16:55:34.708796 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:34 crc kubenswrapper[4769]: I0131 16:55:34.708895 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:34 crc kubenswrapper[4769]: I0131 16:55:34.708928 4769 scope.go:117] "RemoveContainer" containerID="d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805" Jan 31 16:55:34 crc kubenswrapper[4769]: I0131 16:55:34.709014 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:34 crc kubenswrapper[4769]: E0131 16:55:34.709428 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.088534 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh"] Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.096868 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-db-create-k4978"] Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.103829 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-dc33-account-create-update-mlwkh"] Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.109324 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-db-create-k4978"] Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.721161 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dbac680-3b56-4116-8a9c-e41655dce740" path="/var/lib/kubelet/pods/7dbac680-3b56-4116-8a9c-e41655dce740/volumes" Jan 31 16:55:36 crc kubenswrapper[4769]: I0131 16:55:36.722200 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b38780b5-a260-49f0-853b-5532643cc9c7" path="/var/lib/kubelet/pods/b38780b5-a260-49f0-853b-5532643cc9c7/volumes" Jan 31 16:55:48 crc kubenswrapper[4769]: I0131 16:55:48.709323 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:55:48 crc kubenswrapper[4769]: I0131 16:55:48.712287 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:55:48 crc kubenswrapper[4769]: E0131 16:55:48.712867 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:55:49 crc kubenswrapper[4769]: I0131 16:55:49.708434 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:49 crc kubenswrapper[4769]: I0131 16:55:49.709790 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:49 crc kubenswrapper[4769]: I0131 16:55:49.709863 4769 scope.go:117] "RemoveContainer" containerID="d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805" Jan 31 16:55:49 crc kubenswrapper[4769]: I0131 16:55:49.710019 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:49 crc kubenswrapper[4769]: E0131 16:55:49.923361 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.681935 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.682387 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.682462 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.683524 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.683668 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" gracePeriod=600 Jan 31 16:55:50 crc kubenswrapper[4769]: E0131 16:55:50.811162 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.827274 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" exitCode=0 Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.827330 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b"} Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.827395 4769 scope.go:117] "RemoveContainer" containerID="3148c4c5005ab898a2a4ce40c3987f2918283183525c20480b00949fa02629ba" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.828922 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:55:50 crc kubenswrapper[4769]: E0131 16:55:50.829653 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.839350 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerStarted","Data":"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547"} Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.840141 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.840264 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:55:50 crc kubenswrapper[4769]: I0131 16:55:50.840447 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:55:50 crc kubenswrapper[4769]: E0131 16:55:50.841276 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:55:54 crc kubenswrapper[4769]: I0131 16:55:54.066152 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-25psv"] Jan 31 16:55:54 crc kubenswrapper[4769]: I0131 16:55:54.076541 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-db-sync-25psv"] Jan 31 16:55:54 crc kubenswrapper[4769]: I0131 16:55:54.722933 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70c8524d-a0d7-40ce-8fcf-070cf0348c60" path="/var/lib/kubelet/pods/70c8524d-a0d7-40ce-8fcf-070cf0348c60/volumes" Jan 31 16:56:01 crc kubenswrapper[4769]: I0131 16:56:01.041618 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-9xtg8"] Jan 31 16:56:01 crc kubenswrapper[4769]: I0131 16:56:01.053315 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/keystone-bootstrap-9xtg8"] Jan 31 16:56:01 crc kubenswrapper[4769]: I0131 16:56:01.708591 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:56:01 crc kubenswrapper[4769]: I0131 16:56:01.708623 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:56:01 crc kubenswrapper[4769]: E0131 16:56:01.708875 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:56:02 crc kubenswrapper[4769]: I0131 16:56:02.717617 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:56:02 crc kubenswrapper[4769]: E0131 16:56:02.718006 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:56:02 crc kubenswrapper[4769]: I0131 16:56:02.720459 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36f36d22-0f23-4385-af4a-d31963ab0dbd" path="/var/lib/kubelet/pods/36f36d22-0f23-4385-af4a-d31963ab0dbd/volumes" Jan 31 16:56:04 crc kubenswrapper[4769]: I0131 16:56:04.711917 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:56:04 crc kubenswrapper[4769]: I0131 16:56:04.712176 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:56:04 crc kubenswrapper[4769]: I0131 16:56:04.712380 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:56:04 crc kubenswrapper[4769]: E0131 16:56:04.712933 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.048276 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf"] Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.063238 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/barbican-db-create-8hxbg"] Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.073616 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-a94c-account-create-update-xwnsf"] Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.101167 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/barbican-db-create-8hxbg"] Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.715582 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4467a7c3-e52f-4518-93b4-86d4e355dd29" path="/var/lib/kubelet/pods/4467a7c3-e52f-4518-93b4-86d4e355dd29/volumes" Jan 31 16:56:10 crc kubenswrapper[4769]: I0131 16:56:10.716347 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="958897d4-0af2-49e6-b39e-2a0111da08fa" path="/var/lib/kubelet/pods/958897d4-0af2-49e6-b39e-2a0111da08fa/volumes" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.582358 4769 scope.go:117] "RemoveContainer" containerID="4c1dd8f9771ca4c3f7f0837f1873760f93f714c33e5dc999e3bf26bdcec47870" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.607708 4769 scope.go:117] "RemoveContainer" containerID="2ff432476c66bae5134c3131b0251fdf5759c0ffefd1aa82450118befb4e63ec" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.687991 4769 scope.go:117] "RemoveContainer" containerID="213308783e2a3f64ff3763f966ed828e22e23274f00477b8a4850379f417410e" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.708789 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.708832 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:56:13 crc kubenswrapper[4769]: E0131 16:56:13.709290 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.718982 4769 scope.go:117] "RemoveContainer" containerID="4cef552941eb1f7a0ea1810009f1879b3c41600428b341a5e1b7f64716105bf4" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.735243 4769 scope.go:117] "RemoveContainer" containerID="6c49a998918c78603f4614b178c3806f4560fda87e05a1e331e4a5cc280b57f4" Jan 31 16:56:13 crc kubenswrapper[4769]: I0131 16:56:13.762928 4769 scope.go:117] "RemoveContainer" containerID="7946243402cacce4c24d3c3cc781001e1c62bbb1b8f12f155cbdcfcd3855263b" Jan 31 16:56:15 crc kubenswrapper[4769]: I0131 16:56:15.708344 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:56:15 crc kubenswrapper[4769]: E0131 16:56:15.708708 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:56:17 crc kubenswrapper[4769]: I0131 16:56:17.708644 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:56:17 crc kubenswrapper[4769]: I0131 16:56:17.709032 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:56:17 crc kubenswrapper[4769]: I0131 16:56:17.709170 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:56:17 crc kubenswrapper[4769]: E0131 16:56:17.709568 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:56:24 crc kubenswrapper[4769]: I0131 16:56:24.710188 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:56:24 crc kubenswrapper[4769]: I0131 16:56:24.710557 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:56:24 crc kubenswrapper[4769]: E0131 16:56:24.710944 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:56:26 crc kubenswrapper[4769]: I0131 16:56:26.708761 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:56:26 crc kubenswrapper[4769]: E0131 16:56:26.709409 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:56:29 crc kubenswrapper[4769]: I0131 16:56:29.708527 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:56:29 crc kubenswrapper[4769]: I0131 16:56:29.709001 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:56:29 crc kubenswrapper[4769]: I0131 16:56:29.709187 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:56:29 crc kubenswrapper[4769]: E0131 16:56:29.709712 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:56:38 crc kubenswrapper[4769]: I0131 16:56:38.707990 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:56:38 crc kubenswrapper[4769]: I0131 16:56:38.708329 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:56:38 crc kubenswrapper[4769]: E0131 16:56:38.708578 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:56:39 crc kubenswrapper[4769]: I0131 16:56:39.708305 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:56:39 crc kubenswrapper[4769]: E0131 16:56:39.708759 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:56:44 crc kubenswrapper[4769]: I0131 16:56:44.710093 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:56:44 crc kubenswrapper[4769]: I0131 16:56:44.710563 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:56:44 crc kubenswrapper[4769]: I0131 16:56:44.710700 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:56:44 crc kubenswrapper[4769]: E0131 16:56:44.711101 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:56:50 crc kubenswrapper[4769]: I0131 16:56:50.708626 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:56:50 crc kubenswrapper[4769]: I0131 16:56:50.709017 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:56:50 crc kubenswrapper[4769]: E0131 16:56:50.709490 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:56:51 crc kubenswrapper[4769]: I0131 16:56:51.707819 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:56:51 crc kubenswrapper[4769]: E0131 16:56:51.708332 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:56:56 crc kubenswrapper[4769]: I0131 16:56:56.709340 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:56:56 crc kubenswrapper[4769]: I0131 16:56:56.711245 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:56:56 crc kubenswrapper[4769]: I0131 16:56:56.711619 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:56:56 crc kubenswrapper[4769]: E0131 16:56:56.712404 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.347759 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396479 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-server" containerID="cri-o://fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396591 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" containerID="cri-o://e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396611 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-auditor" containerID="cri-o://8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396671 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-replicator" containerID="cri-o://93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396745 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-server" containerID="cri-o://4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396812 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="rsync" containerID="cri-o://46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396798 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="swift-recon-cron" containerID="cri-o://ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396881 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" containerID="cri-o://4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396924 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-reaper" containerID="cri-o://bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396950 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-auditor" containerID="cri-o://4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396968 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-server" containerID="cri-o://e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050" gracePeriod=30 Jan 31 16:56:57 crc kubenswrapper[4769]: I0131 16:56:57.396792 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-storage-0" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-auditor" containerID="cri-o://e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b" gracePeriod=30 Jan 31 16:56:58 crc kubenswrapper[4769]: E0131 16:56:58.067354 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d4b411e_0d38_4e04_a5d5_dfda91ec6ebc.slice/crio-conmon-fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d4b411e_0d38_4e04_a5d5_dfda91ec6ebc.slice/crio-fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b.scope\": RecentStats: unable to find data in memory cache]" Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411203 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411543 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411558 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411568 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411577 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411585 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411592 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411600 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411608 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411425 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411617 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411646 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b" exitCode=0 Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411649 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411669 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411681 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411692 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411702 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411712 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411722 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411733 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411745 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b"} Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.411740 4769 scope.go:117] "RemoveContainer" containerID="d7679f19ae08a5bddb7806b0114bfd77810ac5077e5f109cc699c3738d51d805" Jan 31 16:56:58 crc kubenswrapper[4769]: I0131 16:56:58.434355 4769 scope.go:117] "RemoveContainer" containerID="83d5bd9d99ac6360dc5cdc4e5f3dd5c1beffb6ffbe2ed0a5ff2b925566830ffc" Jan 31 16:57:04 crc kubenswrapper[4769]: I0131 16:57:04.709485 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:04 crc kubenswrapper[4769]: I0131 16:57:04.709843 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:57:04 crc kubenswrapper[4769]: E0131 16:57:04.710357 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:06 crc kubenswrapper[4769]: I0131 16:57:06.709083 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:57:06 crc kubenswrapper[4769]: E0131 16:57:06.709671 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:57:12 crc kubenswrapper[4769]: I0131 16:57:12.115554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:57:12 crc kubenswrapper[4769]: E0131 16:57:12.115775 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:57:12 crc kubenswrapper[4769]: E0131 16:57:12.116089 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 16:59:14.116061871 +0000 UTC m=+1802.190230570 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:57:12 crc kubenswrapper[4769]: E0131 16:57:12.443600 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:57:12 crc kubenswrapper[4769]: I0131 16:57:12.550814 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:57:15 crc kubenswrapper[4769]: I0131 16:57:15.708199 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:15 crc kubenswrapper[4769]: I0131 16:57:15.708465 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:57:15 crc kubenswrapper[4769]: E0131 16:57:15.708855 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:20 crc kubenswrapper[4769]: I0131 16:57:20.708319 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:57:20 crc kubenswrapper[4769]: E0131 16:57:20.709093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:57:26 crc kubenswrapper[4769]: I0131 16:57:26.707837 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:26 crc kubenswrapper[4769]: I0131 16:57:26.708105 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:57:26 crc kubenswrapper[4769]: E0131 16:57:26.708311 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.728105 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.823245 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerID="ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0" exitCode=137 Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.823289 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0"} Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.823318 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc","Type":"ContainerDied","Data":"a2690a6a2281cbd47c5d2d20cd8ab0432f06ed91625c342732d742c7207158c1"} Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.823336 4769 scope.go:117] "RemoveContainer" containerID="4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.823574 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.839453 4769 scope.go:117] "RemoveContainer" containerID="e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.853804 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.866811 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvlwz\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz\") pod \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.866885 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.866995 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock\") pod \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.867071 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") pod \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.867136 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache\") pod \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\" (UID: \"2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc\") " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.867383 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock" (OuterVolumeSpecName: "lock") pod "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.867770 4769 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-lock\") on node \"crc\" DevicePath \"\"" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.868120 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache" (OuterVolumeSpecName: "cache") pod "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.871523 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.871639 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.873197 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz" (OuterVolumeSpecName: "kube-api-access-cvlwz") pod "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc"). InnerVolumeSpecName "kube-api-access-cvlwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.873195 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "swift") pod "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" (UID: "2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.918882 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.938426 4769 scope.go:117] "RemoveContainer" containerID="ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.960341 4769 scope.go:117] "RemoveContainer" containerID="46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.969816 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvlwz\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-kube-api-access-cvlwz\") on node \"crc\" DevicePath \"\"" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.969864 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.969880 4769 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.969893 4769 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc-cache\") on node \"crc\" DevicePath \"\"" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.977853 4769 scope.go:117] "RemoveContainer" containerID="8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.983253 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 31 16:57:27 crc kubenswrapper[4769]: I0131 16:57:27.993006 4769 scope.go:117] "RemoveContainer" containerID="93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.011457 4769 scope.go:117] "RemoveContainer" containerID="4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.026467 4769 scope.go:117] "RemoveContainer" containerID="e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.043862 4769 scope.go:117] "RemoveContainer" containerID="e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.057660 4769 scope.go:117] "RemoveContainer" containerID="bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.071242 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.079095 4769 scope.go:117] "RemoveContainer" containerID="4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.097288 4769 scope.go:117] "RemoveContainer" containerID="fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.113112 4769 scope.go:117] "RemoveContainer" containerID="4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.113597 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547\": container with ID starting with 4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547 not found: ID does not exist" containerID="4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.113634 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547"} err="failed to get container status \"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547\": rpc error: code = NotFound desc = could not find container \"4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547\": container with ID starting with 4fac2c453da18bc79dc1d78042d47a8e898bcc5b83d4d1fd3e8e4a5a16d17547 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.113661 4769 scope.go:117] "RemoveContainer" containerID="e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.113965 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09\": container with ID starting with e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09 not found: ID does not exist" containerID="e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.114041 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09"} err="failed to get container status \"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09\": rpc error: code = NotFound desc = could not find container \"e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09\": container with ID starting with e1ade552601aec6431d86caf9b8c2913f4682a139a2e153bb125575e94e78d09 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.114069 4769 scope.go:117] "RemoveContainer" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.114626 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834\": container with ID starting with 85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834 not found: ID does not exist" containerID="85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.114662 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834"} err="failed to get container status \"85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834\": rpc error: code = NotFound desc = could not find container \"85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834\": container with ID starting with 85171d22ea7aba0d2cc2f3e3c5a1d5588a4dcc13108f639e26ddd04660a8b834 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.114708 4769 scope.go:117] "RemoveContainer" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.114955 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70\": container with ID starting with b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70 not found: ID does not exist" containerID="b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115121 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70"} err="failed to get container status \"b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70\": rpc error: code = NotFound desc = could not find container \"b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70\": container with ID starting with b0d0b0226d2d68e52b80b8a3819a2b898a59e1d0a34d8edf113602bbdabaaf70 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115140 4769 scope.go:117] "RemoveContainer" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.115441 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd\": container with ID starting with 606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd not found: ID does not exist" containerID="606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115467 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd"} err="failed to get container status \"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd\": rpc error: code = NotFound desc = could not find container \"606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd\": container with ID starting with 606b945b21b44863bc237f55365cc89ef6ace802b31395f55b3dd5048fc6e1bd not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115486 4769 scope.go:117] "RemoveContainer" containerID="ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.115750 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0\": container with ID starting with ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0 not found: ID does not exist" containerID="ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115777 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0"} err="failed to get container status \"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0\": rpc error: code = NotFound desc = could not find container \"ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0\": container with ID starting with ae4b76720e39c2c27b201ebc5d00611354fa2b33e6e7091e3d3a7bfb26b1fbb0 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.115793 4769 scope.go:117] "RemoveContainer" containerID="46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.115998 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2\": container with ID starting with 46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2 not found: ID does not exist" containerID="46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116027 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2"} err="failed to get container status \"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2\": rpc error: code = NotFound desc = could not find container \"46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2\": container with ID starting with 46b7e6f3d2ee35bb4d2c43646e7b5a993ec4c3547e917bdc2031fe9df98a4af2 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116043 4769 scope.go:117] "RemoveContainer" containerID="8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.116244 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b\": container with ID starting with 8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b not found: ID does not exist" containerID="8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116269 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b"} err="failed to get container status \"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b\": rpc error: code = NotFound desc = could not find container \"8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b\": container with ID starting with 8b402ba867fc3e1b8aeec82352ec3d1add5d3e9c83be0f8ac71af1060a36bf0b not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116286 4769 scope.go:117] "RemoveContainer" containerID="93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.116485 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5\": container with ID starting with 93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5 not found: ID does not exist" containerID="93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116533 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5"} err="failed to get container status \"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5\": rpc error: code = NotFound desc = could not find container \"93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5\": container with ID starting with 93c8011f29f9ae3f1996f20440683ea8bb6cc88c5be0ef6bbbaacd6928db68c5 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.116550 4769 scope.go:117] "RemoveContainer" containerID="4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.117010 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432\": container with ID starting with 4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432 not found: ID does not exist" containerID="4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.117102 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432"} err="failed to get container status \"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432\": rpc error: code = NotFound desc = could not find container \"4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432\": container with ID starting with 4e075c22c49a5afeea376a661a368be4d293c9991ddde3126f346b830af8b432 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.117193 4769 scope.go:117] "RemoveContainer" containerID="e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.117787 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b\": container with ID starting with e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b not found: ID does not exist" containerID="e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.117856 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b"} err="failed to get container status \"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b\": rpc error: code = NotFound desc = could not find container \"e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b\": container with ID starting with e42277a545765b43f10b0536804f3468c53b03a1a33d27e1a80dd6f67e1f3a5b not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.117879 4769 scope.go:117] "RemoveContainer" containerID="e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.118220 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050\": container with ID starting with e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050 not found: ID does not exist" containerID="e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.118247 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050"} err="failed to get container status \"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050\": rpc error: code = NotFound desc = could not find container \"e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050\": container with ID starting with e4db49b800be05015d3c4a037e1e3cbf3907f6f78aff0523dd56386753181050 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.118320 4769 scope.go:117] "RemoveContainer" containerID="bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.118741 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe\": container with ID starting with bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe not found: ID does not exist" containerID="bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.118771 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe"} err="failed to get container status \"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe\": rpc error: code = NotFound desc = could not find container \"bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe\": container with ID starting with bcfb5448de1e5529813dca9b7928a45f7bd23f656a2e7b5b2d7b49d9a93b15fe not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.118814 4769 scope.go:117] "RemoveContainer" containerID="4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.119054 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49\": container with ID starting with 4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49 not found: ID does not exist" containerID="4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.119106 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49"} err="failed to get container status \"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49\": rpc error: code = NotFound desc = could not find container \"4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49\": container with ID starting with 4ee56ae49386474b93f4a54b1463bfaa4ac6f1b6153c68ae08ea5286ac683a49 not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.119126 4769 scope.go:117] "RemoveContainer" containerID="fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.119391 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b\": container with ID starting with fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b not found: ID does not exist" containerID="fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.119447 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b"} err="failed to get container status \"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b\": rpc error: code = NotFound desc = could not find container \"fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b\": container with ID starting with fb8cd1b7a56a25fb1e03e6e444d2dbacf0780faf3d8442e84148d0d3903f3a0b not found: ID does not exist" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.162103 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.169388 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.199683 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.199929 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="extract-utilities" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.199940 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="extract-utilities" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.199952 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.199960 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.199969 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="swift-recon-cron" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.199975 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="swift-recon-cron" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.199984 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.199989 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.199997 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200003 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200009 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200016 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200022 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200027 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200036 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200042 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200048 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200053 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200061 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200066 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200076 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200082 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200088 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200095 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200102 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="rsync" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200109 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="rsync" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200119 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200125 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-server" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200136 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200141 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200149 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200155 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200161 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200167 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200173 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200179 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200187 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200193 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200199 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200204 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200213 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200218 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200230 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200236 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200243 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200248 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200256 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200261 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200268 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="extract-content" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200275 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="extract-content" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200281 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200287 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-server" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200298 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200303 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200310 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200315 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200324 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200330 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200337 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200342 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200351 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200356 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-server" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200362 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200368 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200377 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200382 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200394 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200399 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200408 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-reaper" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200414 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-reaper" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200424 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200429 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200437 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="extract-utilities" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200443 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="extract-utilities" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.200452 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="extract-content" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.200457 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="extract-content" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201231 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201255 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201265 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c2354ec-99b5-4d49-b323-8fbc638cd57d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201273 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201310 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201320 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201328 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201334 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201343 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201350 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201357 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201383 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201391 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca6f8b8c-fbbd-4e8c-88c0-5cfce726029d" containerName="registry-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201400 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201408 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="swift-recon-cron" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201416 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201426 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201437 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201449 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201455 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201464 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201470 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201476 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201486 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201508 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="rsync" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201517 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201524 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201531 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-auditor" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201539 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201545 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201685 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201695 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-reaper" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201703 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-server" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201716 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201828 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201835 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201843 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201848 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201856 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201861 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201873 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201880 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201886 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201891 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201905 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201910 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: E0131 16:57:28.201917 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.201922 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202030 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202040 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202048 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="account-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202057 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-updater" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202069 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202271 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="container-replicator" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.202283 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" containerName="object-expirer" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.206005 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.208285 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"swift-kuttl-tests"/"swift-storage-config-data" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.211820 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.375850 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-cache\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.375914 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtm4v\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-kube-api-access-rtm4v\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.375985 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-lock\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.376010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-etc-swift\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.376044 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.477603 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtm4v\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-kube-api-access-rtm4v\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.477685 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-lock\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.477746 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-etc-swift\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.477852 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.478074 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-cache\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.478273 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") device mount path \"/mnt/openstack/pv11\"" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.478623 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-lock\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.478752 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/13aa61f9-8314-4571-afce-8c24594fa917-cache\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.486256 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-etc-swift\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.505004 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtm4v\" (UniqueName: \"kubernetes.io/projected/13aa61f9-8314-4571-afce-8c24594fa917-kube-api-access-rtm4v\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.508844 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"13aa61f9-8314-4571-afce-8c24594fa917\") " pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.519624 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-0" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.730051 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc" path="/var/lib/kubelet/pods/2d4b411e-0d38-4e04-a5d5-dfda91ec6ebc/volumes" Jan 31 16:57:28 crc kubenswrapper[4769]: I0131 16:57:28.826072 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-0"] Jan 31 16:57:28 crc kubenswrapper[4769]: W0131 16:57:28.834019 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13aa61f9_8314_4571_afce_8c24594fa917.slice/crio-01aa56971a0a8e0abfd994eb3281383d3628c9ea1097ebfdba8c8243a46415f8 WatchSource:0}: Error finding container 01aa56971a0a8e0abfd994eb3281383d3628c9ea1097ebfdba8c8243a46415f8: Status 404 returned error can't find the container with id 01aa56971a0a8e0abfd994eb3281383d3628c9ea1097ebfdba8c8243a46415f8 Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845089 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="1c1d3fed82e1bd16e362e784ef3fe7a25c111b1cc8bd0eb60f7bdafe958eaf47" exitCode=1 Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845139 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"6be11375cd139308d7b8ac72eb6a60c60438d1943cc25438b7a626dc4e59814b"} Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"3c0565ee0d6f5bcd57ec3eb03dbfde105920b77d0fa7314c462c1e298c40c9f6"} Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845178 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"aadfaf48aca760e5ce2285c2dfc87350252dee43f47d9a615dd6e7e255dc1771"} Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845190 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"1c1d3fed82e1bd16e362e784ef3fe7a25c111b1cc8bd0eb60f7bdafe958eaf47"} Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845202 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"7edc9f5d3f8c3832d39e3b7ef47e3d19aea6274ad9308a9663d916788dac6f25"} Jan 31 16:57:29 crc kubenswrapper[4769]: I0131 16:57:29.845213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"01aa56971a0a8e0abfd994eb3281383d3628c9ea1097ebfdba8c8243a46415f8"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858257 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="2a9561ddfdacd41e3f04fa0b363d3fec9cdf53741a138edcf2a99e38dc4437b4" exitCode=1 Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858326 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"bf4aaec2d3a92f3432a99ed853481d712b0e0d231574b7d135464ebbab0e5230"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858663 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"f7cc1be0fa9920304d7aae71ad24a69c108737575216d738f71ebd7385087250"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858684 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"a964787dc89a0cf7c0a77e74d5e679588ccfe1a4fa84ac9c0b369adb5d127e8f"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858696 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"a0b2ba6d51ec40d5d594dcf3fccf60ddfb8791afe72e3b8c322cf69d789ddbfd"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858706 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"fdb980b61ae08e734a09a70061099c29e5b18ad652139ebc9f45fa0cecef607b"} Jan 31 16:57:30 crc kubenswrapper[4769]: I0131 16:57:30.858714 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"2a9561ddfdacd41e3f04fa0b363d3fec9cdf53741a138edcf2a99e38dc4437b4"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875076 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="0a86e82f17f6cf7c00aae8487a584ed1341bddf4d61075932873097b5deac0c9" exitCode=1 Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875175 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"54aa29a59cb08f4733dedfb8715fc52d68581ff639fe56da4cf929591dcfc586"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"abfc432d628ce5f086a74703aca0b401791213002b89a50355dc5e95169fc5c3"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875468 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"45e5b2ec83560c73ecb783421cdffef1dbac039213b03adc85d0f40bdbd7b660"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875489 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"0a86e82f17f6cf7c00aae8487a584ed1341bddf4d61075932873097b5deac0c9"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.875571 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"b2ba4adff2ae1ce1a2aae1094c4aaa7d2e260cea91f7d8580dd53fb0f834f1bc"} Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.876627 4769 scope.go:117] "RemoveContainer" containerID="1c1d3fed82e1bd16e362e784ef3fe7a25c111b1cc8bd0eb60f7bdafe958eaf47" Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.876774 4769 scope.go:117] "RemoveContainer" containerID="2a9561ddfdacd41e3f04fa0b363d3fec9cdf53741a138edcf2a99e38dc4437b4" Jan 31 16:57:31 crc kubenswrapper[4769]: I0131 16:57:31.876939 4769 scope.go:117] "RemoveContainer" containerID="0a86e82f17f6cf7c00aae8487a584ed1341bddf4d61075932873097b5deac0c9" Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895700 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="54aa29a59cb08f4733dedfb8715fc52d68581ff639fe56da4cf929591dcfc586" exitCode=1 Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895740 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" exitCode=1 Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895751 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" exitCode=1 Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895775 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"54aa29a59cb08f4733dedfb8715fc52d68581ff639fe56da4cf929591dcfc586"} Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895807 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2"} Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895823 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3"} Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895880 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8"} Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.895900 4769 scope.go:117] "RemoveContainer" containerID="2a9561ddfdacd41e3f04fa0b363d3fec9cdf53741a138edcf2a99e38dc4437b4" Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.897027 4769 scope.go:117] "RemoveContainer" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.897282 4769 scope.go:117] "RemoveContainer" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.897615 4769 scope.go:117] "RemoveContainer" containerID="54aa29a59cb08f4733dedfb8715fc52d68581ff639fe56da4cf929591dcfc586" Jan 31 16:57:32 crc kubenswrapper[4769]: I0131 16:57:32.935586 4769 scope.go:117] "RemoveContainer" containerID="1c1d3fed82e1bd16e362e784ef3fe7a25c111b1cc8bd0eb60f7bdafe958eaf47" Jan 31 16:57:33 crc kubenswrapper[4769]: E0131 16:57:33.089971 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.919668 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2" exitCode=1 Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.919706 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c" exitCode=1 Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.919728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2"} Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.919762 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c"} Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.919784 4769 scope.go:117] "RemoveContainer" containerID="0a86e82f17f6cf7c00aae8487a584ed1341bddf4d61075932873097b5deac0c9" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.920530 4769 scope.go:117] "RemoveContainer" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.920601 4769 scope.go:117] "RemoveContainer" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.920713 4769 scope.go:117] "RemoveContainer" containerID="f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.920766 4769 scope.go:117] "RemoveContainer" containerID="faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c" Jan 31 16:57:33 crc kubenswrapper[4769]: E0131 16:57:33.921108 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:57:33 crc kubenswrapper[4769]: I0131 16:57:33.982290 4769 scope.go:117] "RemoveContainer" containerID="54aa29a59cb08f4733dedfb8715fc52d68581ff639fe56da4cf929591dcfc586" Jan 31 16:57:34 crc kubenswrapper[4769]: I0131 16:57:34.708348 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:57:34 crc kubenswrapper[4769]: E0131 16:57:34.708794 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:57:34 crc kubenswrapper[4769]: I0131 16:57:34.936073 4769 scope.go:117] "RemoveContainer" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" Jan 31 16:57:34 crc kubenswrapper[4769]: I0131 16:57:34.936532 4769 scope.go:117] "RemoveContainer" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" Jan 31 16:57:34 crc kubenswrapper[4769]: I0131 16:57:34.936760 4769 scope.go:117] "RemoveContainer" containerID="f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2" Jan 31 16:57:34 crc kubenswrapper[4769]: I0131 16:57:34.936831 4769 scope.go:117] "RemoveContainer" containerID="faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c" Jan 31 16:57:34 crc kubenswrapper[4769]: E0131 16:57:34.937416 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:57:41 crc kubenswrapper[4769]: I0131 16:57:41.708431 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:41 crc kubenswrapper[4769]: I0131 16:57:41.708824 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:57:41 crc kubenswrapper[4769]: E0131 16:57:41.897563 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:41 crc kubenswrapper[4769]: I0131 16:57:41.993700 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc"} Jan 31 16:57:41 crc kubenswrapper[4769]: I0131 16:57:41.994612 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:57:41 crc kubenswrapper[4769]: I0131 16:57:41.995609 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:41 crc kubenswrapper[4769]: E0131 16:57:41.996296 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:43 crc kubenswrapper[4769]: I0131 16:57:43.000126 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" exitCode=1 Jan 31 16:57:43 crc kubenswrapper[4769]: I0131 16:57:43.000166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc"} Jan 31 16:57:43 crc kubenswrapper[4769]: I0131 16:57:43.000195 4769 scope.go:117] "RemoveContainer" containerID="dd3b22c098cb5ff9324f75488cb8c100817cdb9683a4061e67522bb86b98e242" Jan 31 16:57:43 crc kubenswrapper[4769]: I0131 16:57:43.000681 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:43 crc kubenswrapper[4769]: I0131 16:57:43.000702 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:57:43 crc kubenswrapper[4769]: E0131 16:57:43.001019 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:44 crc kubenswrapper[4769]: I0131 16:57:44.010260 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:44 crc kubenswrapper[4769]: I0131 16:57:44.010303 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:57:44 crc kubenswrapper[4769]: E0131 16:57:44.010707 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:44 crc kubenswrapper[4769]: I0131 16:57:44.644897 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:57:45 crc kubenswrapper[4769]: I0131 16:57:45.020006 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:45 crc kubenswrapper[4769]: I0131 16:57:45.020049 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:57:45 crc kubenswrapper[4769]: E0131 16:57:45.020437 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:57:49 crc kubenswrapper[4769]: I0131 16:57:49.709213 4769 scope.go:117] "RemoveContainer" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" Jan 31 16:57:49 crc kubenswrapper[4769]: I0131 16:57:49.709884 4769 scope.go:117] "RemoveContainer" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" Jan 31 16:57:49 crc kubenswrapper[4769]: I0131 16:57:49.709938 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:57:49 crc kubenswrapper[4769]: I0131 16:57:49.710067 4769 scope.go:117] "RemoveContainer" containerID="f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2" Jan 31 16:57:49 crc kubenswrapper[4769]: I0131 16:57:49.710135 4769 scope.go:117] "RemoveContainer" containerID="faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c" Jan 31 16:57:49 crc kubenswrapper[4769]: E0131 16:57:49.710398 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:57:50 crc kubenswrapper[4769]: I0131 16:57:50.072291 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38"} Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092639 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" exitCode=1 Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092704 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" exitCode=1 Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092714 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" exitCode=1 Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092710 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38"} Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092752 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa"} Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092767 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7"} Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092723 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" exitCode=1 Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092782 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2"} Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.092796 4769 scope.go:117] "RemoveContainer" containerID="3944f2ffa664f6a0a62ca43c65145eddc34aafd1d27b08a76b4b10ef2519d2e8" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.093938 4769 scope.go:117] "RemoveContainer" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.094087 4769 scope.go:117] "RemoveContainer" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.094536 4769 scope.go:117] "RemoveContainer" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.094630 4769 scope.go:117] "RemoveContainer" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" Jan 31 16:57:51 crc kubenswrapper[4769]: E0131 16:57:51.095171 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.175263 4769 scope.go:117] "RemoveContainer" containerID="faa2763df53db21806339603c389572b44963559579d697fe1a8dee9b6c3534c" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.231556 4769 scope.go:117] "RemoveContainer" containerID="f66625de793fc0edf8b039238510df72ea372a52897b2db85ca80838cdeea6a2" Jan 31 16:57:51 crc kubenswrapper[4769]: I0131 16:57:51.289861 4769 scope.go:117] "RemoveContainer" containerID="eceafeffd65628e218d08bc7d8717c002b9b294b4bc3c91a5c2af94ae0d944f3" Jan 31 16:57:52 crc kubenswrapper[4769]: I0131 16:57:52.113347 4769 scope.go:117] "RemoveContainer" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" Jan 31 16:57:52 crc kubenswrapper[4769]: I0131 16:57:52.114044 4769 scope.go:117] "RemoveContainer" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" Jan 31 16:57:52 crc kubenswrapper[4769]: I0131 16:57:52.114285 4769 scope.go:117] "RemoveContainer" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" Jan 31 16:57:52 crc kubenswrapper[4769]: I0131 16:57:52.114352 4769 scope.go:117] "RemoveContainer" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" Jan 31 16:57:52 crc kubenswrapper[4769]: E0131 16:57:52.114832 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:57:57 crc kubenswrapper[4769]: I0131 16:57:57.707942 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:57:57 crc kubenswrapper[4769]: I0131 16:57:57.709464 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:57:57 crc kubenswrapper[4769]: E0131 16:57:57.710116 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:58:00 crc kubenswrapper[4769]: I0131 16:58:00.708489 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:58:00 crc kubenswrapper[4769]: E0131 16:58:00.708830 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:58:06 crc kubenswrapper[4769]: I0131 16:58:06.708645 4769 scope.go:117] "RemoveContainer" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" Jan 31 16:58:06 crc kubenswrapper[4769]: I0131 16:58:06.709113 4769 scope.go:117] "RemoveContainer" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" Jan 31 16:58:06 crc kubenswrapper[4769]: I0131 16:58:06.709322 4769 scope.go:117] "RemoveContainer" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" Jan 31 16:58:06 crc kubenswrapper[4769]: I0131 16:58:06.709376 4769 scope.go:117] "RemoveContainer" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" Jan 31 16:58:06 crc kubenswrapper[4769]: E0131 16:58:06.709753 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:58:09 crc kubenswrapper[4769]: I0131 16:58:09.708370 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:58:09 crc kubenswrapper[4769]: I0131 16:58:09.708406 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:58:09 crc kubenswrapper[4769]: E0131 16:58:09.708767 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:58:14 crc kubenswrapper[4769]: I0131 16:58:14.707808 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:58:14 crc kubenswrapper[4769]: E0131 16:58:14.708107 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:58:17 crc kubenswrapper[4769]: I0131 16:58:17.709283 4769 scope.go:117] "RemoveContainer" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" Jan 31 16:58:17 crc kubenswrapper[4769]: I0131 16:58:17.709733 4769 scope.go:117] "RemoveContainer" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" Jan 31 16:58:17 crc kubenswrapper[4769]: I0131 16:58:17.709914 4769 scope.go:117] "RemoveContainer" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" Jan 31 16:58:17 crc kubenswrapper[4769]: I0131 16:58:17.709979 4769 scope.go:117] "RemoveContainer" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" Jan 31 16:58:18 crc kubenswrapper[4769]: I0131 16:58:18.341793 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" exitCode=1 Jan 31 16:58:18 crc kubenswrapper[4769]: I0131 16:58:18.341860 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a"} Jan 31 16:58:18 crc kubenswrapper[4769]: I0131 16:58:18.341908 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db"} Jan 31 16:58:18 crc kubenswrapper[4769]: I0131 16:58:18.341921 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3"} Jan 31 16:58:18 crc kubenswrapper[4769]: I0131 16:58:18.341950 4769 scope.go:117] "RemoveContainer" containerID="7788795209f4daf0f9ff3c8ad6330be7e3863f55c6757840440c7e34d51f4e38" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391451 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" exitCode=1 Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391798 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" exitCode=1 Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391813 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" exitCode=1 Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391623 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a"} Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db"} Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391882 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78"} Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.391906 4769 scope.go:117] "RemoveContainer" containerID="21899648e7596958b1b65d11ce0e38b397d66eb37b4b57ed12b6235eaddf60f7" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.392246 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.392337 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.392469 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.392546 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:58:19 crc kubenswrapper[4769]: E0131 16:58:19.392864 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.443230 4769 scope.go:117] "RemoveContainer" containerID="42abe82453750a4e287c7812dd79df31bf5b1c77c41cb24c20948d29244739c2" Jan 31 16:58:19 crc kubenswrapper[4769]: I0131 16:58:19.485485 4769 scope.go:117] "RemoveContainer" containerID="0bf63a78e0057013048800b19d15d8493bba57656c98424c19eda8ae1f20b2fa" Jan 31 16:58:20 crc kubenswrapper[4769]: I0131 16:58:20.411081 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:58:20 crc kubenswrapper[4769]: I0131 16:58:20.411144 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:58:20 crc kubenswrapper[4769]: I0131 16:58:20.411227 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:58:20 crc kubenswrapper[4769]: I0131 16:58:20.411263 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:58:20 crc kubenswrapper[4769]: E0131 16:58:20.411520 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:58:23 crc kubenswrapper[4769]: I0131 16:58:23.707989 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:58:23 crc kubenswrapper[4769]: I0131 16:58:23.708487 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:58:23 crc kubenswrapper[4769]: E0131 16:58:23.709532 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:58:25 crc kubenswrapper[4769]: I0131 16:58:25.708421 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:58:25 crc kubenswrapper[4769]: E0131 16:58:25.709238 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.709084 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.709778 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.709877 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.709951 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.710045 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:58:35 crc kubenswrapper[4769]: I0131 16:58:35.710083 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:58:35 crc kubenswrapper[4769]: E0131 16:58:35.710242 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:58:35 crc kubenswrapper[4769]: E0131 16:58:35.710365 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:58:40 crc kubenswrapper[4769]: I0131 16:58:40.707810 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:58:40 crc kubenswrapper[4769]: E0131 16:58:40.709130 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:58:46 crc kubenswrapper[4769]: I0131 16:58:46.708380 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:58:46 crc kubenswrapper[4769]: I0131 16:58:46.708893 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:58:46 crc kubenswrapper[4769]: E0131 16:58:46.709138 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:58:48 crc kubenswrapper[4769]: I0131 16:58:48.709087 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:58:48 crc kubenswrapper[4769]: I0131 16:58:48.709617 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:58:48 crc kubenswrapper[4769]: I0131 16:58:48.709801 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:58:48 crc kubenswrapper[4769]: I0131 16:58:48.709868 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:58:48 crc kubenswrapper[4769]: E0131 16:58:48.710323 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:58:53 crc kubenswrapper[4769]: I0131 16:58:53.708807 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:58:53 crc kubenswrapper[4769]: E0131 16:58:53.709384 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:58:58 crc kubenswrapper[4769]: I0131 16:58:58.709097 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:58:58 crc kubenswrapper[4769]: I0131 16:58:58.709148 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:58:58 crc kubenswrapper[4769]: E0131 16:58:58.709563 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:02 crc kubenswrapper[4769]: I0131 16:59:02.716848 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:59:02 crc kubenswrapper[4769]: I0131 16:59:02.717346 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:59:02 crc kubenswrapper[4769]: I0131 16:59:02.717561 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:59:02 crc kubenswrapper[4769]: I0131 16:59:02.717627 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822360 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" exitCode=1 Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822759 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" exitCode=1 Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794"} Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822808 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862"} Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822828 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a"} Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822842 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908"} Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822776 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" exitCode=1 Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.822862 4769 scope.go:117] "RemoveContainer" containerID="3211bf518ae147f03de7eac48a0be00d6716d0aa50689eafaa6eb07d9d81085a" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.823568 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.823655 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.823767 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:03 crc kubenswrapper[4769]: E0131 16:59:03.824099 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.881281 4769 scope.go:117] "RemoveContainer" containerID="60e47aa2a2d612a05472cbae7ee28cf270baabe5212de3d44d079a5644de48db" Jan 31 16:59:03 crc kubenswrapper[4769]: I0131 16:59:03.928053 4769 scope.go:117] "RemoveContainer" containerID="4a185923538bbde35b0da0b7ad7f8ed71c78cb32f9409f84fa6f2ca74a160fc3" Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.841051 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" exitCode=1 Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.841105 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794"} Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.841172 4769 scope.go:117] "RemoveContainer" containerID="22c74bfe9d7a140b4666c530bbfa4d1f35ef9c29e8015c568555451b23433e78" Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.842334 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.842469 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.842700 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:04 crc kubenswrapper[4769]: I0131 16:59:04.842771 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:04 crc kubenswrapper[4769]: E0131 16:59:04.843264 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:05 crc kubenswrapper[4769]: I0131 16:59:05.708470 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:59:05 crc kubenswrapper[4769]: E0131 16:59:05.708867 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:59:09 crc kubenswrapper[4769]: I0131 16:59:09.708348 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:59:09 crc kubenswrapper[4769]: I0131 16:59:09.708865 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:09 crc kubenswrapper[4769]: E0131 16:59:09.900768 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:10 crc kubenswrapper[4769]: I0131 16:59:10.911572 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43"} Jan 31 16:59:10 crc kubenswrapper[4769]: I0131 16:59:10.912156 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:59:10 crc kubenswrapper[4769]: I0131 16:59:10.912467 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:10 crc kubenswrapper[4769]: E0131 16:59:10.912734 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:11 crc kubenswrapper[4769]: I0131 16:59:11.918597 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:11 crc kubenswrapper[4769]: E0131 16:59:11.918770 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:14 crc kubenswrapper[4769]: I0131 16:59:14.172946 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:59:14 crc kubenswrapper[4769]: E0131 16:59:14.173123 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 16:59:14 crc kubenswrapper[4769]: E0131 16:59:14.173471 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:01:16.17344989 +0000 UTC m=+1924.247618569 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 16:59:14 crc kubenswrapper[4769]: I0131 16:59:14.648804 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:15 crc kubenswrapper[4769]: E0131 16:59:15.553055 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 16:59:16 crc kubenswrapper[4769]: I0131 16:59:16.024502 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 16:59:16 crc kubenswrapper[4769]: I0131 16:59:16.646686 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:17 crc kubenswrapper[4769]: I0131 16:59:17.648036 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:18 crc kubenswrapper[4769]: I0131 16:59:18.708214 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:18 crc kubenswrapper[4769]: I0131 16:59:18.708509 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:18 crc kubenswrapper[4769]: I0131 16:59:18.708610 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:18 crc kubenswrapper[4769]: I0131 16:59:18.708642 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:18 crc kubenswrapper[4769]: E0131 16:59:18.708896 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.651792 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.651908 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.653044 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.653090 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.653142 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43" gracePeriod=30 Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.654293 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:20 crc kubenswrapper[4769]: I0131 16:59:20.708661 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:59:20 crc kubenswrapper[4769]: E0131 16:59:20.709187 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:59:20 crc kubenswrapper[4769]: E0131 16:59:20.853464 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb764692_fbb8_4fb4_860c_2cd0e0cfd452.slice/crio-conmon-310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb764692_fbb8_4fb4_860c_2cd0e0cfd452.slice/crio-310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43.scope\": RecentStats: unable to find data in memory cache]" Jan 31 16:59:20 crc kubenswrapper[4769]: E0131 16:59:20.969868 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.066876 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43" exitCode=0 Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.066924 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43"} Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.066955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868"} Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.066974 4769 scope.go:117] "RemoveContainer" containerID="d265c21335904ec04c8f26185f4e269eaf14f174a03a26c659d9c48710dc4dc7" Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.067194 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:59:21 crc kubenswrapper[4769]: I0131 16:59:21.067696 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:21 crc kubenswrapper[4769]: E0131 16:59:21.067969 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:22 crc kubenswrapper[4769]: I0131 16:59:22.083271 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:22 crc kubenswrapper[4769]: E0131 16:59:22.083594 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:26 crc kubenswrapper[4769]: I0131 16:59:26.647620 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:26 crc kubenswrapper[4769]: I0131 16:59:26.647836 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:29 crc kubenswrapper[4769]: I0131 16:59:29.648020 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:30 crc kubenswrapper[4769]: I0131 16:59:30.708912 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:30 crc kubenswrapper[4769]: I0131 16:59:30.709235 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:30 crc kubenswrapper[4769]: I0131 16:59:30.709354 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:30 crc kubenswrapper[4769]: I0131 16:59:30.709396 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:30 crc kubenswrapper[4769]: E0131 16:59:30.709805 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:31 crc kubenswrapper[4769]: I0131 16:59:31.647147 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.647975 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.648075 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.648816 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.648840 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.648880 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" gracePeriod=30 Jan 31 16:59:32 crc kubenswrapper[4769]: I0131 16:59:32.653243 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 16:59:32 crc kubenswrapper[4769]: E0131 16:59:32.780742 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:33 crc kubenswrapper[4769]: I0131 16:59:33.193188 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" exitCode=0 Jan 31 16:59:33 crc kubenswrapper[4769]: I0131 16:59:33.193267 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868"} Jan 31 16:59:33 crc kubenswrapper[4769]: I0131 16:59:33.193515 4769 scope.go:117] "RemoveContainer" containerID="310be50240179fa2209271fd79b25e6680a68035d868c6e2e90359569a480b43" Jan 31 16:59:33 crc kubenswrapper[4769]: I0131 16:59:33.194455 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 16:59:33 crc kubenswrapper[4769]: I0131 16:59:33.194548 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:33 crc kubenswrapper[4769]: E0131 16:59:33.195000 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:35 crc kubenswrapper[4769]: I0131 16:59:35.708167 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:59:35 crc kubenswrapper[4769]: E0131 16:59:35.708780 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.243815 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="b2ba4adff2ae1ce1a2aae1094c4aaa7d2e260cea91f7d8580dd53fb0f834f1bc" exitCode=1 Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.243875 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"b2ba4adff2ae1ce1a2aae1094c4aaa7d2e260cea91f7d8580dd53fb0f834f1bc"} Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.246138 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.246311 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.246545 4769 scope.go:117] "RemoveContainer" containerID="b2ba4adff2ae1ce1a2aae1094c4aaa7d2e260cea91f7d8580dd53fb0f834f1bc" Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.246691 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:38 crc kubenswrapper[4769]: I0131 16:59:38.246820 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:38 crc kubenswrapper[4769]: E0131 16:59:38.429465 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:39 crc kubenswrapper[4769]: I0131 16:59:39.263625 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b"} Jan 31 16:59:39 crc kubenswrapper[4769]: I0131 16:59:39.264247 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:39 crc kubenswrapper[4769]: I0131 16:59:39.264306 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:39 crc kubenswrapper[4769]: I0131 16:59:39.264402 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:39 crc kubenswrapper[4769]: I0131 16:59:39.264440 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:39 crc kubenswrapper[4769]: E0131 16:59:39.264695 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:45 crc kubenswrapper[4769]: I0131 16:59:45.708054 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 16:59:45 crc kubenswrapper[4769]: I0131 16:59:45.708662 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:45 crc kubenswrapper[4769]: E0131 16:59:45.709105 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 16:59:48 crc kubenswrapper[4769]: I0131 16:59:48.708300 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 16:59:48 crc kubenswrapper[4769]: E0131 16:59:48.709292 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 16:59:51 crc kubenswrapper[4769]: I0131 16:59:51.708535 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 16:59:51 crc kubenswrapper[4769]: I0131 16:59:51.708878 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 16:59:51 crc kubenswrapper[4769]: I0131 16:59:51.708987 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 16:59:51 crc kubenswrapper[4769]: I0131 16:59:51.709029 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 16:59:51 crc kubenswrapper[4769]: E0131 16:59:51.709383 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 16:59:57 crc kubenswrapper[4769]: I0131 16:59:57.708008 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 16:59:57 crc kubenswrapper[4769]: I0131 16:59:57.708324 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 16:59:57 crc kubenswrapper[4769]: E0131 16:59:57.708664 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.135068 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj"] Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.136405 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.138751 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.139517 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.150991 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj"] Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.269558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fmqn\" (UniqueName: \"kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.269646 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.269826 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.371618 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.371697 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fmqn\" (UniqueName: \"kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.371756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.372614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.388461 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.397094 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fmqn\" (UniqueName: \"kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn\") pod \"collect-profiles-29497980-bbvbj\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.454075 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:00 crc kubenswrapper[4769]: I0131 17:00:00.919599 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj"] Jan 31 17:00:01 crc kubenswrapper[4769]: I0131 17:00:01.459467 4769 generic.go:334] "Generic (PLEG): container finished" podID="42c87dea-0d5e-4db3-b5bc-8585f184f1f6" containerID="4f81385bff0e5e6127fd793617b6bef113dec5a9cca119d9d112f7c55462e4ca" exitCode=0 Jan 31 17:00:01 crc kubenswrapper[4769]: I0131 17:00:01.459532 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" event={"ID":"42c87dea-0d5e-4db3-b5bc-8585f184f1f6","Type":"ContainerDied","Data":"4f81385bff0e5e6127fd793617b6bef113dec5a9cca119d9d112f7c55462e4ca"} Jan 31 17:00:01 crc kubenswrapper[4769]: I0131 17:00:01.459755 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" event={"ID":"42c87dea-0d5e-4db3-b5bc-8585f184f1f6","Type":"ContainerStarted","Data":"b705c2d8feb07d63c834a1196828b6e215ee926f18ba271aa31ed2fa6138e384"} Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.715262 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:00:02 crc kubenswrapper[4769]: E0131 17:00:02.715682 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.809095 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.928104 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fmqn\" (UniqueName: \"kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn\") pod \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.928208 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume\") pod \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.928243 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume\") pod \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\" (UID: \"42c87dea-0d5e-4db3-b5bc-8585f184f1f6\") " Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.928824 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "42c87dea-0d5e-4db3-b5bc-8585f184f1f6" (UID: "42c87dea-0d5e-4db3-b5bc-8585f184f1f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.933545 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "42c87dea-0d5e-4db3-b5bc-8585f184f1f6" (UID: "42c87dea-0d5e-4db3-b5bc-8585f184f1f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 17:00:02 crc kubenswrapper[4769]: I0131 17:00:02.937630 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn" (OuterVolumeSpecName: "kube-api-access-6fmqn") pod "42c87dea-0d5e-4db3-b5bc-8585f184f1f6" (UID: "42c87dea-0d5e-4db3-b5bc-8585f184f1f6"). InnerVolumeSpecName "kube-api-access-6fmqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.029946 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fmqn\" (UniqueName: \"kubernetes.io/projected/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-kube-api-access-6fmqn\") on node \"crc\" DevicePath \"\"" Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.029981 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.029995 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/42c87dea-0d5e-4db3-b5bc-8585f184f1f6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.478839 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" event={"ID":"42c87dea-0d5e-4db3-b5bc-8585f184f1f6","Type":"ContainerDied","Data":"b705c2d8feb07d63c834a1196828b6e215ee926f18ba271aa31ed2fa6138e384"} Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.478897 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b705c2d8feb07d63c834a1196828b6e215ee926f18ba271aa31ed2fa6138e384" Jan 31 17:00:03 crc kubenswrapper[4769]: I0131 17:00:03.478973 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497980-bbvbj" Jan 31 17:00:05 crc kubenswrapper[4769]: I0131 17:00:05.709389 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 17:00:05 crc kubenswrapper[4769]: I0131 17:00:05.709915 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 17:00:05 crc kubenswrapper[4769]: I0131 17:00:05.710097 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 17:00:05 crc kubenswrapper[4769]: I0131 17:00:05.710167 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 17:00:05 crc kubenswrapper[4769]: E0131 17:00:05.710685 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:00:09 crc kubenswrapper[4769]: I0131 17:00:09.708666 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:00:09 crc kubenswrapper[4769]: I0131 17:00:09.709143 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:00:09 crc kubenswrapper[4769]: E0131 17:00:09.709421 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:00:13 crc kubenswrapper[4769]: I0131 17:00:13.708539 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:00:13 crc kubenswrapper[4769]: E0131 17:00:13.709147 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:00:16 crc kubenswrapper[4769]: I0131 17:00:16.709705 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 17:00:16 crc kubenswrapper[4769]: I0131 17:00:16.710134 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 17:00:16 crc kubenswrapper[4769]: I0131 17:00:16.710329 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 17:00:16 crc kubenswrapper[4769]: I0131 17:00:16.710437 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 17:00:16 crc kubenswrapper[4769]: E0131 17:00:16.710964 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:00:22 crc kubenswrapper[4769]: I0131 17:00:22.716188 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:00:22 crc kubenswrapper[4769]: I0131 17:00:22.716866 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:00:22 crc kubenswrapper[4769]: E0131 17:00:22.717239 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:00:27 crc kubenswrapper[4769]: I0131 17:00:27.708601 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:00:27 crc kubenswrapper[4769]: E0131 17:00:27.709316 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:00:31 crc kubenswrapper[4769]: I0131 17:00:31.709410 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 17:00:31 crc kubenswrapper[4769]: I0131 17:00:31.709868 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 17:00:31 crc kubenswrapper[4769]: I0131 17:00:31.710049 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 17:00:31 crc kubenswrapper[4769]: I0131 17:00:31.710115 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754125 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" exitCode=1 Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754168 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" exitCode=1 Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce"} Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754218 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7"} Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754230 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79"} Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754245 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f"} Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754264 4769 scope.go:117] "RemoveContainer" containerID="11bfe466448632be9f721cc3dce43ff9165322a0234d7b9e73930e16149a925a" Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.754984 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.755071 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:00:32 crc kubenswrapper[4769]: E0131 17:00:32.755604 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:00:32 crc kubenswrapper[4769]: I0131 17:00:32.810631 4769 scope.go:117] "RemoveContainer" containerID="56dbbc2c6cef8f7b323a242e8dd5e9ba07f61df13d525232f6325968322d2908" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.777670 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" exitCode=1 Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.777931 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" exitCode=1 Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.777954 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce"} Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.777978 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7"} Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.777996 4769 scope.go:117] "RemoveContainer" containerID="8707e97ccea91e915243fcf8abe6cf20acf92ad826357e257c0dfcc9e7f78794" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.779576 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.779637 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.779745 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.779806 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:00:33 crc kubenswrapper[4769]: E0131 17:00:33.780134 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:00:33 crc kubenswrapper[4769]: I0131 17:00:33.817547 4769 scope.go:117] "RemoveContainer" containerID="2219c6addafd23e501bf6e1a777bd861123de973c9f43c63ee391384be6d7862" Jan 31 17:00:35 crc kubenswrapper[4769]: I0131 17:00:35.708835 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:00:35 crc kubenswrapper[4769]: I0131 17:00:35.709222 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:00:35 crc kubenswrapper[4769]: E0131 17:00:35.709701 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:00:41 crc kubenswrapper[4769]: I0131 17:00:41.708704 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:00:41 crc kubenswrapper[4769]: E0131 17:00:41.709443 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.710480 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.711126 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.711362 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.711453 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:00:47 crc kubenswrapper[4769]: E0131 17:00:47.712097 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.712603 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:00:47 crc kubenswrapper[4769]: I0131 17:00:47.712634 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:00:47 crc kubenswrapper[4769]: E0131 17:00:47.713022 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:00:55 crc kubenswrapper[4769]: I0131 17:00:55.707951 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:00:55 crc kubenswrapper[4769]: I0131 17:00:55.980561 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b"} Jan 31 17:00:58 crc kubenswrapper[4769]: I0131 17:00:58.708870 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:00:58 crc kubenswrapper[4769]: I0131 17:00:58.709646 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:00:58 crc kubenswrapper[4769]: E0131 17:00:58.710067 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.027718 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="a0b2ba6d51ec40d5d594dcf3fccf60ddfb8791afe72e3b8c322cf69d789ddbfd" exitCode=1 Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.027835 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"a0b2ba6d51ec40d5d594dcf3fccf60ddfb8791afe72e3b8c322cf69d789ddbfd"} Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.029111 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.029277 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.029334 4769 scope.go:117] "RemoveContainer" containerID="a0b2ba6d51ec40d5d594dcf3fccf60ddfb8791afe72e3b8c322cf69d789ddbfd" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.029482 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.029593 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.156776 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/keystone-cron-29497981-l6wdw"] Jan 31 17:01:00 crc kubenswrapper[4769]: E0131 17:01:00.157083 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42c87dea-0d5e-4db3-b5bc-8585f184f1f6" containerName="collect-profiles" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.157113 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="42c87dea-0d5e-4db3-b5bc-8585f184f1f6" containerName="collect-profiles" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.157312 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="42c87dea-0d5e-4db3-b5bc-8585f184f1f6" containerName="collect-profiles" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.157854 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.174892 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-cron-29497981-l6wdw"] Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.227392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.227751 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdb7c\" (UniqueName: \"kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.227839 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: E0131 17:01:00.288359 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.329982 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.330079 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdb7c\" (UniqueName: \"kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.330202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.340258 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.341028 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.346360 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdb7c\" (UniqueName: \"kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c\") pod \"keystone-cron-29497981-l6wdw\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.522446 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:00 crc kubenswrapper[4769]: I0131 17:01:00.999264 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/keystone-cron-29497981-l6wdw"] Jan 31 17:01:01 crc kubenswrapper[4769]: W0131 17:01:01.005270 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58e62f4c_9b5c_49e1_a5b4_bde6e98c763e.slice/crio-27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2 WatchSource:0}: Error finding container 27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2: Status 404 returned error can't find the container with id 27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2 Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.037737 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" event={"ID":"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e","Type":"ContainerStarted","Data":"27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2"} Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.047174 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768"} Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.047859 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.047920 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.048005 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:01 crc kubenswrapper[4769]: I0131 17:01:01.048037 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:01 crc kubenswrapper[4769]: E0131 17:01:01.048265 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:01:02 crc kubenswrapper[4769]: I0131 17:01:02.061706 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" event={"ID":"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e","Type":"ContainerStarted","Data":"ec67ddc72a85ddfc4af926c8e9deb55845a0e2862a8988b4d15474715261256f"} Jan 31 17:01:02 crc kubenswrapper[4769]: I0131 17:01:02.089134 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" podStartSLOduration=2.0891066560000002 podStartE2EDuration="2.089106656s" podCreationTimestamp="2026-01-31 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-31 17:01:02.085779687 +0000 UTC m=+1910.159948446" watchObservedRunningTime="2026-01-31 17:01:02.089106656 +0000 UTC m=+1910.163275365" Jan 31 17:01:03 crc kubenswrapper[4769]: I0131 17:01:03.070384 4769 generic.go:334] "Generic (PLEG): container finished" podID="58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" containerID="ec67ddc72a85ddfc4af926c8e9deb55845a0e2862a8988b4d15474715261256f" exitCode=0 Jan 31 17:01:03 crc kubenswrapper[4769]: I0131 17:01:03.070421 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" event={"ID":"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e","Type":"ContainerDied","Data":"ec67ddc72a85ddfc4af926c8e9deb55845a0e2862a8988b4d15474715261256f"} Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.391365 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.507742 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdb7c\" (UniqueName: \"kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c\") pod \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.507838 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys\") pod \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.507883 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data\") pod \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\" (UID: \"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e\") " Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.512691 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c" (OuterVolumeSpecName: "kube-api-access-vdb7c") pod "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" (UID: "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e"). InnerVolumeSpecName "kube-api-access-vdb7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.513859 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" (UID: "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.547792 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data" (OuterVolumeSpecName: "config-data") pod "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" (UID: "58e62f4c-9b5c-49e1-a5b4-bde6e98c763e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.609471 4769 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.609531 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-config-data\") on node \"crc\" DevicePath \"\"" Jan 31 17:01:04 crc kubenswrapper[4769]: I0131 17:01:04.609544 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdb7c\" (UniqueName: \"kubernetes.io/projected/58e62f4c-9b5c-49e1-a5b4-bde6e98c763e-kube-api-access-vdb7c\") on node \"crc\" DevicePath \"\"" Jan 31 17:01:05 crc kubenswrapper[4769]: I0131 17:01:05.094217 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" event={"ID":"58e62f4c-9b5c-49e1-a5b4-bde6e98c763e","Type":"ContainerDied","Data":"27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2"} Jan 31 17:01:05 crc kubenswrapper[4769]: I0131 17:01:05.094594 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27cf295f4ab745234914542da088f084cd6fe77a153167dbb0f1e7a2f457a8e2" Jan 31 17:01:05 crc kubenswrapper[4769]: I0131 17:01:05.094341 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/keystone-cron-29497981-l6wdw" Jan 31 17:01:11 crc kubenswrapper[4769]: I0131 17:01:11.708890 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:01:11 crc kubenswrapper[4769]: I0131 17:01:11.709322 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:01:11 crc kubenswrapper[4769]: E0131 17:01:11.709835 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:01:13 crc kubenswrapper[4769]: I0131 17:01:13.709646 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:13 crc kubenswrapper[4769]: I0131 17:01:13.711009 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:13 crc kubenswrapper[4769]: I0131 17:01:13.711221 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:13 crc kubenswrapper[4769]: I0131 17:01:13.711354 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:13 crc kubenswrapper[4769]: E0131 17:01:13.711862 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:01:16 crc kubenswrapper[4769]: I0131 17:01:16.250924 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:01:16 crc kubenswrapper[4769]: E0131 17:01:16.251093 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:01:16 crc kubenswrapper[4769]: E0131 17:01:16.251544 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:03:18.251519943 +0000 UTC m=+2046.325688622 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:01:19 crc kubenswrapper[4769]: E0131 17:01:19.025436 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:01:19 crc kubenswrapper[4769]: I0131 17:01:19.233804 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:01:26 crc kubenswrapper[4769]: I0131 17:01:26.709529 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:01:26 crc kubenswrapper[4769]: I0131 17:01:26.710345 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:01:26 crc kubenswrapper[4769]: E0131 17:01:26.710779 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:01:28 crc kubenswrapper[4769]: I0131 17:01:28.709798 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:28 crc kubenswrapper[4769]: I0131 17:01:28.710345 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:28 crc kubenswrapper[4769]: I0131 17:01:28.710641 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:28 crc kubenswrapper[4769]: I0131 17:01:28.710735 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:28 crc kubenswrapper[4769]: E0131 17:01:28.711334 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:01:41 crc kubenswrapper[4769]: I0131 17:01:41.708394 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:01:41 crc kubenswrapper[4769]: I0131 17:01:41.709681 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:01:41 crc kubenswrapper[4769]: E0131 17:01:41.710292 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:01:42 crc kubenswrapper[4769]: I0131 17:01:42.716209 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:42 crc kubenswrapper[4769]: I0131 17:01:42.716316 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:42 crc kubenswrapper[4769]: I0131 17:01:42.716433 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:42 crc kubenswrapper[4769]: I0131 17:01:42.716480 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:42 crc kubenswrapper[4769]: E0131 17:01:42.716840 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:01:53 crc kubenswrapper[4769]: I0131 17:01:53.708207 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:01:53 crc kubenswrapper[4769]: I0131 17:01:53.708711 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:01:53 crc kubenswrapper[4769]: E0131 17:01:53.708990 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:01:56 crc kubenswrapper[4769]: I0131 17:01:56.709564 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:01:56 crc kubenswrapper[4769]: I0131 17:01:56.709991 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:01:56 crc kubenswrapper[4769]: I0131 17:01:56.710187 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:01:56 crc kubenswrapper[4769]: I0131 17:01:56.710259 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:01:56 crc kubenswrapper[4769]: E0131 17:01:56.710962 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:02:06 crc kubenswrapper[4769]: I0131 17:02:06.708565 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:06 crc kubenswrapper[4769]: I0131 17:02:06.708928 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:02:06 crc kubenswrapper[4769]: E0131 17:02:06.709115 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:07 crc kubenswrapper[4769]: I0131 17:02:07.708799 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:02:07 crc kubenswrapper[4769]: I0131 17:02:07.708865 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:02:07 crc kubenswrapper[4769]: I0131 17:02:07.708966 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:02:07 crc kubenswrapper[4769]: I0131 17:02:07.708998 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:02:07 crc kubenswrapper[4769]: E0131 17:02:07.709246 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:02:18 crc kubenswrapper[4769]: I0131 17:02:18.708702 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:18 crc kubenswrapper[4769]: I0131 17:02:18.709390 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:02:18 crc kubenswrapper[4769]: E0131 17:02:18.709911 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:20 crc kubenswrapper[4769]: I0131 17:02:20.709315 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:02:20 crc kubenswrapper[4769]: I0131 17:02:20.709640 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:02:20 crc kubenswrapper[4769]: I0131 17:02:20.709937 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:02:20 crc kubenswrapper[4769]: I0131 17:02:20.710027 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:02:20 crc kubenswrapper[4769]: E0131 17:02:20.710863 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:02:32 crc kubenswrapper[4769]: I0131 17:02:32.712986 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:32 crc kubenswrapper[4769]: I0131 17:02:32.713436 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:02:32 crc kubenswrapper[4769]: E0131 17:02:32.713739 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:33 crc kubenswrapper[4769]: I0131 17:02:33.709123 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:02:33 crc kubenswrapper[4769]: I0131 17:02:33.709197 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:02:33 crc kubenswrapper[4769]: I0131 17:02:33.709291 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:02:33 crc kubenswrapper[4769]: I0131 17:02:33.709325 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:02:33 crc kubenswrapper[4769]: E0131 17:02:33.709638 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.951928 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:37 crc kubenswrapper[4769]: E0131 17:02:37.952689 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" containerName="keystone-cron" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.952706 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" containerName="keystone-cron" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.952913 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="58e62f4c-9b5c-49e1-a5b4-bde6e98c763e" containerName="keystone-cron" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.954059 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.984159 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.993980 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhsd7\" (UniqueName: \"kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.994053 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:37 crc kubenswrapper[4769]: I0131 17:02:37.994112 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.095114 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhsd7\" (UniqueName: \"kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.095166 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.095202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.095766 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.096033 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.114552 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhsd7\" (UniqueName: \"kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7\") pod \"community-operators-cqqk6\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.145674 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.147069 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.164309 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.196070 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km76j\" (UniqueName: \"kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.196142 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.196178 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.283859 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.297555 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.297751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km76j\" (UniqueName: \"kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.297849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.298300 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.298317 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.319061 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km76j\" (UniqueName: \"kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j\") pod \"redhat-marketplace-ss8hg\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.472301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:38 crc kubenswrapper[4769]: I0131 17:02:38.554839 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:38.920473 4769 generic.go:334] "Generic (PLEG): container finished" podID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerID="889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a" exitCode=0 Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:38.920553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerDied","Data":"889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a"} Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:38.921004 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerStarted","Data":"fa76c55bd49a13b7b3d655b835a5c870fabd5f802068e81e3210725bc99d35a2"} Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:38.923005 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:39.513306 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:39 crc kubenswrapper[4769]: W0131 17:02:39.521083 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf681b0e_4c5b_45d6_a26c_3f013bc4f427.slice/crio-5f102477226690f5bdaa38fb5fc0969c85e8a49a699e794e36d7c1fea76251a4 WatchSource:0}: Error finding container 5f102477226690f5bdaa38fb5fc0969c85e8a49a699e794e36d7c1fea76251a4: Status 404 returned error can't find the container with id 5f102477226690f5bdaa38fb5fc0969c85e8a49a699e794e36d7c1fea76251a4 Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:39.930523 4769 generic.go:334] "Generic (PLEG): container finished" podID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerID="4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d" exitCode=0 Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:39.930599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerDied","Data":"4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d"} Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:39.930657 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerStarted","Data":"5f102477226690f5bdaa38fb5fc0969c85e8a49a699e794e36d7c1fea76251a4"} Jan 31 17:02:39 crc kubenswrapper[4769]: I0131 17:02:39.933527 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerStarted","Data":"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b"} Jan 31 17:02:40 crc kubenswrapper[4769]: I0131 17:02:40.952213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerStarted","Data":"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263"} Jan 31 17:02:40 crc kubenswrapper[4769]: I0131 17:02:40.954933 4769 generic.go:334] "Generic (PLEG): container finished" podID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerID="529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b" exitCode=0 Jan 31 17:02:40 crc kubenswrapper[4769]: I0131 17:02:40.954984 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerDied","Data":"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b"} Jan 31 17:02:41 crc kubenswrapper[4769]: I0131 17:02:41.964428 4769 generic.go:334] "Generic (PLEG): container finished" podID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerID="90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263" exitCode=0 Jan 31 17:02:41 crc kubenswrapper[4769]: I0131 17:02:41.964530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerDied","Data":"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263"} Jan 31 17:02:41 crc kubenswrapper[4769]: I0131 17:02:41.967571 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerStarted","Data":"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0"} Jan 31 17:02:42 crc kubenswrapper[4769]: I0131 17:02:42.003688 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cqqk6" podStartSLOduration=2.572423743 podStartE2EDuration="5.003674022s" podCreationTimestamp="2026-01-31 17:02:37 +0000 UTC" firstStartedPulling="2026-01-31 17:02:38.922694742 +0000 UTC m=+2006.996863411" lastFinishedPulling="2026-01-31 17:02:41.353945021 +0000 UTC m=+2009.428113690" observedRunningTime="2026-01-31 17:02:42.00096928 +0000 UTC m=+2010.075137969" watchObservedRunningTime="2026-01-31 17:02:42.003674022 +0000 UTC m=+2010.077842681" Jan 31 17:02:42 crc kubenswrapper[4769]: I0131 17:02:42.979512 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerStarted","Data":"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d"} Jan 31 17:02:42 crc kubenswrapper[4769]: I0131 17:02:42.997830 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ss8hg" podStartSLOduration=2.416873632 podStartE2EDuration="4.997815425s" podCreationTimestamp="2026-01-31 17:02:38 +0000 UTC" firstStartedPulling="2026-01-31 17:02:39.931935619 +0000 UTC m=+2008.006104288" lastFinishedPulling="2026-01-31 17:02:42.512877412 +0000 UTC m=+2010.587046081" observedRunningTime="2026-01-31 17:02:42.99465986 +0000 UTC m=+2011.068828539" watchObservedRunningTime="2026-01-31 17:02:42.997815425 +0000 UTC m=+2011.071984094" Jan 31 17:02:45 crc kubenswrapper[4769]: I0131 17:02:45.708246 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:45 crc kubenswrapper[4769]: I0131 17:02:45.708589 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:02:45 crc kubenswrapper[4769]: E0131 17:02:45.866557 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:45 crc kubenswrapper[4769]: I0131 17:02:45.999474 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4"} Jan 31 17:02:46 crc kubenswrapper[4769]: I0131 17:02:46.000010 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:02:46 crc kubenswrapper[4769]: I0131 17:02:46.001264 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:46 crc kubenswrapper[4769]: E0131 17:02:46.001627 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.015630 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" exitCode=1 Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.015685 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4"} Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.015724 4769 scope.go:117] "RemoveContainer" containerID="f1f37e41e997dc24eb862caacca07d96d60555cbd8eb9630c5979470491c1bcc" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.018828 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.018915 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:02:47 crc kubenswrapper[4769]: E0131 17:02:47.019491 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.644830 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.709674 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.709840 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.710034 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:02:47 crc kubenswrapper[4769]: I0131 17:02:47.710115 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:02:47 crc kubenswrapper[4769]: E0131 17:02:47.710763 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.031165 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.031209 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:02:48 crc kubenswrapper[4769]: E0131 17:02:48.031718 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.284421 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.285853 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.358657 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.472879 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.472958 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:48 crc kubenswrapper[4769]: I0131 17:02:48.537676 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:49 crc kubenswrapper[4769]: I0131 17:02:49.040570 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:02:49 crc kubenswrapper[4769]: I0131 17:02:49.040636 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:02:49 crc kubenswrapper[4769]: E0131 17:02:49.041093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:02:49 crc kubenswrapper[4769]: I0131 17:02:49.113533 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:49 crc kubenswrapper[4769]: I0131 17:02:49.119169 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:51 crc kubenswrapper[4769]: I0131 17:02:51.936781 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.071404 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cqqk6" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="registry-server" containerID="cri-o://009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0" gracePeriod=2 Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.338214 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.338728 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ss8hg" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="registry-server" containerID="cri-o://a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d" gracePeriod=2 Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.516528 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.652230 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities\") pod \"726f9f9b-73f5-454a-b717-0488dc0680e1\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.652331 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content\") pod \"726f9f9b-73f5-454a-b717-0488dc0680e1\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.652477 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhsd7\" (UniqueName: \"kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7\") pod \"726f9f9b-73f5-454a-b717-0488dc0680e1\" (UID: \"726f9f9b-73f5-454a-b717-0488dc0680e1\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.653196 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities" (OuterVolumeSpecName: "utilities") pod "726f9f9b-73f5-454a-b717-0488dc0680e1" (UID: "726f9f9b-73f5-454a-b717-0488dc0680e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.665225 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7" (OuterVolumeSpecName: "kube-api-access-lhsd7") pod "726f9f9b-73f5-454a-b717-0488dc0680e1" (UID: "726f9f9b-73f5-454a-b717-0488dc0680e1"). InnerVolumeSpecName "kube-api-access-lhsd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.704218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "726f9f9b-73f5-454a-b717-0488dc0680e1" (UID: "726f9f9b-73f5-454a-b717-0488dc0680e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.718323 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.754532 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhsd7\" (UniqueName: \"kubernetes.io/projected/726f9f9b-73f5-454a-b717-0488dc0680e1-kube-api-access-lhsd7\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.754561 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.754570 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/726f9f9b-73f5-454a-b717-0488dc0680e1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.855440 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities\") pod \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.855512 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km76j\" (UniqueName: \"kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j\") pod \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.855591 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content\") pod \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\" (UID: \"bf681b0e-4c5b-45d6-a26c-3f013bc4f427\") " Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.856127 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities" (OuterVolumeSpecName: "utilities") pod "bf681b0e-4c5b-45d6-a26c-3f013bc4f427" (UID: "bf681b0e-4c5b-45d6-a26c-3f013bc4f427"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.859347 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j" (OuterVolumeSpecName: "kube-api-access-km76j") pod "bf681b0e-4c5b-45d6-a26c-3f013bc4f427" (UID: "bf681b0e-4c5b-45d6-a26c-3f013bc4f427"). InnerVolumeSpecName "kube-api-access-km76j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.874636 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf681b0e-4c5b-45d6-a26c-3f013bc4f427" (UID: "bf681b0e-4c5b-45d6-a26c-3f013bc4f427"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.957462 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.957778 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km76j\" (UniqueName: \"kubernetes.io/projected/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-kube-api-access-km76j\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:52 crc kubenswrapper[4769]: I0131 17:02:52.957791 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf681b0e-4c5b-45d6-a26c-3f013bc4f427-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.083246 4769 generic.go:334] "Generic (PLEG): container finished" podID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerID="a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d" exitCode=0 Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.083314 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerDied","Data":"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d"} Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.083387 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ss8hg" event={"ID":"bf681b0e-4c5b-45d6-a26c-3f013bc4f427","Type":"ContainerDied","Data":"5f102477226690f5bdaa38fb5fc0969c85e8a49a699e794e36d7c1fea76251a4"} Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.083429 4769 scope.go:117] "RemoveContainer" containerID="a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.083818 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ss8hg" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.086969 4769 generic.go:334] "Generic (PLEG): container finished" podID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerID="009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0" exitCode=0 Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.087004 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerDied","Data":"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0"} Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.087028 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqqk6" event={"ID":"726f9f9b-73f5-454a-b717-0488dc0680e1","Type":"ContainerDied","Data":"fa76c55bd49a13b7b3d655b835a5c870fabd5f802068e81e3210725bc99d35a2"} Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.087052 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqqk6" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.119238 4769 scope.go:117] "RemoveContainer" containerID="90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.120979 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.138942 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cqqk6"] Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.145675 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.151836 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ss8hg"] Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.168466 4769 scope.go:117] "RemoveContainer" containerID="4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.191930 4769 scope.go:117] "RemoveContainer" containerID="a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.192602 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d\": container with ID starting with a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d not found: ID does not exist" containerID="a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.192686 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d"} err="failed to get container status \"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d\": rpc error: code = NotFound desc = could not find container \"a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d\": container with ID starting with a601a003704860b22c96c8e5023c592625d036587e9bd051892815ca118ee35d not found: ID does not exist" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.192774 4769 scope.go:117] "RemoveContainer" containerID="90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.193619 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263\": container with ID starting with 90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263 not found: ID does not exist" containerID="90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.193675 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263"} err="failed to get container status \"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263\": rpc error: code = NotFound desc = could not find container \"90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263\": container with ID starting with 90ca2a5b6414ae1547210db2a50923371f0f0850a79e5b351340b458bf09e263 not found: ID does not exist" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.193720 4769 scope.go:117] "RemoveContainer" containerID="4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.194072 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d\": container with ID starting with 4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d not found: ID does not exist" containerID="4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.194124 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d"} err="failed to get container status \"4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d\": rpc error: code = NotFound desc = could not find container \"4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d\": container with ID starting with 4e0b303bcfe7d1814a4eb3a0cfab3e457fdc578aa0b6a66d248a1c288de3b36d not found: ID does not exist" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.194152 4769 scope.go:117] "RemoveContainer" containerID="009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.245725 4769 scope.go:117] "RemoveContainer" containerID="529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.268800 4769 scope.go:117] "RemoveContainer" containerID="889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.292429 4769 scope.go:117] "RemoveContainer" containerID="009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.293167 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0\": container with ID starting with 009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0 not found: ID does not exist" containerID="009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.293358 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0"} err="failed to get container status \"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0\": rpc error: code = NotFound desc = could not find container \"009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0\": container with ID starting with 009eef4e46441c73bc89dc947a46330014949ef5cd58229292107c69a95d8ba0 not found: ID does not exist" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.293556 4769 scope.go:117] "RemoveContainer" containerID="529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.294106 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b\": container with ID starting with 529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b not found: ID does not exist" containerID="529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.294194 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b"} err="failed to get container status \"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b\": rpc error: code = NotFound desc = could not find container \"529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b\": container with ID starting with 529946674b63d557d649fa7c7109cb85ed9f75f650520ee032df048e727acb7b not found: ID does not exist" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.294239 4769 scope.go:117] "RemoveContainer" containerID="889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a" Jan 31 17:02:53 crc kubenswrapper[4769]: E0131 17:02:53.294722 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a\": container with ID starting with 889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a not found: ID does not exist" containerID="889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a" Jan 31 17:02:53 crc kubenswrapper[4769]: I0131 17:02:53.294898 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a"} err="failed to get container status \"889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a\": rpc error: code = NotFound desc = could not find container \"889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a\": container with ID starting with 889e738211bc0b5e5c86dc64d41c5876bf0236bd225b1c60bc03aa3ac9798f1a not found: ID does not exist" Jan 31 17:02:54 crc kubenswrapper[4769]: I0131 17:02:54.718465 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" path="/var/lib/kubelet/pods/726f9f9b-73f5-454a-b717-0488dc0680e1/volumes" Jan 31 17:02:54 crc kubenswrapper[4769]: I0131 17:02:54.719567 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" path="/var/lib/kubelet/pods/bf681b0e-4c5b-45d6-a26c-3f013bc4f427/volumes" Jan 31 17:02:58 crc kubenswrapper[4769]: I0131 17:02:58.708784 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:02:58 crc kubenswrapper[4769]: I0131 17:02:58.709440 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:02:58 crc kubenswrapper[4769]: I0131 17:02:58.709647 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:02:58 crc kubenswrapper[4769]: I0131 17:02:58.709716 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:02:58 crc kubenswrapper[4769]: E0131 17:02:58.710185 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:00 crc kubenswrapper[4769]: I0131 17:03:00.708316 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:03:00 crc kubenswrapper[4769]: I0131 17:03:00.708374 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:03:00 crc kubenswrapper[4769]: E0131 17:03:00.709038 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:03:12 crc kubenswrapper[4769]: I0131 17:03:12.722278 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:03:12 crc kubenswrapper[4769]: I0131 17:03:12.723091 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:03:12 crc kubenswrapper[4769]: I0131 17:03:12.723214 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:03:12 crc kubenswrapper[4769]: I0131 17:03:12.723258 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:03:13 crc kubenswrapper[4769]: I0131 17:03:13.271911 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" exitCode=1 Jan 31 17:03:13 crc kubenswrapper[4769]: I0131 17:03:13.271957 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae"} Jan 31 17:03:13 crc kubenswrapper[4769]: I0131 17:03:13.272237 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a"} Jan 31 17:03:13 crc kubenswrapper[4769]: I0131 17:03:13.272267 4769 scope.go:117] "RemoveContainer" containerID="6e00588bb54246ac0cd6934442f8185ffd164b8d39abd6a1b5a48cd745e6167f" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294102 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" exitCode=1 Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294514 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" exitCode=1 Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294536 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" exitCode=1 Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294172 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4"} Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae"} Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294641 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad"} Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.294668 4769 scope.go:117] "RemoveContainer" containerID="0ef7de79971423fafcf7c18a8f98e935b4f871aede67e4e5105af970a207ece7" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.295399 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.295607 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.295849 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.295962 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:03:14 crc kubenswrapper[4769]: E0131 17:03:14.296560 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.362232 4769 scope.go:117] "RemoveContainer" containerID="357f8d0d6ccbf36e63d549005f2f92eff94d00063c06552ab3b976fe07e21d79" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.406891 4769 scope.go:117] "RemoveContainer" containerID="b9558604778873ea776e72c4f76b2b177dc8fec6b51328e080419a4400cce4ce" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.708795 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:03:14 crc kubenswrapper[4769]: I0131 17:03:14.708852 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:03:14 crc kubenswrapper[4769]: E0131 17:03:14.709319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:03:15 crc kubenswrapper[4769]: I0131 17:03:15.314877 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:03:15 crc kubenswrapper[4769]: I0131 17:03:15.315054 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:03:15 crc kubenswrapper[4769]: I0131 17:03:15.315325 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:03:15 crc kubenswrapper[4769]: I0131 17:03:15.315417 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:03:15 crc kubenswrapper[4769]: E0131 17:03:15.316032 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:18 crc kubenswrapper[4769]: I0131 17:03:18.279057 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:03:18 crc kubenswrapper[4769]: E0131 17:03:18.279302 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:03:18 crc kubenswrapper[4769]: E0131 17:03:18.279694 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:05:20.279656878 +0000 UTC m=+2168.353825587 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:03:20 crc kubenswrapper[4769]: I0131 17:03:20.682437 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:03:20 crc kubenswrapper[4769]: I0131 17:03:20.682877 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.352303 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353764 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="extract-utilities" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353798 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="extract-utilities" Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353819 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="extract-content" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353831 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="extract-content" Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353853 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353865 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353898 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="extract-utilities" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353909 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="extract-utilities" Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353923 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353933 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: E0131 17:03:21.353945 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="extract-content" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.353956 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="extract-content" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.354163 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="726f9f9b-73f5-454a-b717-0488dc0680e1" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.354205 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf681b0e-4c5b-45d6-a26c-3f013bc4f427" containerName="registry-server" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.355459 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.379460 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.435201 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkcvb\" (UniqueName: \"kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.435310 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.435397 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.537192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.537423 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkcvb\" (UniqueName: \"kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.537471 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.537922 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.538126 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.578616 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkcvb\" (UniqueName: \"kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb\") pod \"certified-operators-vrjlv\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.688236 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:21 crc kubenswrapper[4769]: I0131 17:03:21.948425 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:22 crc kubenswrapper[4769]: E0131 17:03:22.235725 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:03:22 crc kubenswrapper[4769]: I0131 17:03:22.419936 4769 generic.go:334] "Generic (PLEG): container finished" podID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerID="ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418" exitCode=0 Jan 31 17:03:22 crc kubenswrapper[4769]: I0131 17:03:22.420048 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:03:22 crc kubenswrapper[4769]: I0131 17:03:22.421645 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerDied","Data":"ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418"} Jan 31 17:03:22 crc kubenswrapper[4769]: I0131 17:03:22.421708 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerStarted","Data":"bb8baf72e207d49fc36700bea5ac9d6bb5150b95dfebec35670648088e065879"} Jan 31 17:03:23 crc kubenswrapper[4769]: I0131 17:03:23.431610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerStarted","Data":"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba"} Jan 31 17:03:24 crc kubenswrapper[4769]: I0131 17:03:24.438852 4769 generic.go:334] "Generic (PLEG): container finished" podID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerID="c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba" exitCode=0 Jan 31 17:03:24 crc kubenswrapper[4769]: I0131 17:03:24.438887 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerDied","Data":"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba"} Jan 31 17:03:25 crc kubenswrapper[4769]: I0131 17:03:25.449762 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerStarted","Data":"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc"} Jan 31 17:03:25 crc kubenswrapper[4769]: I0131 17:03:25.474480 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vrjlv" podStartSLOduration=2.083494521 podStartE2EDuration="4.474460595s" podCreationTimestamp="2026-01-31 17:03:21 +0000 UTC" firstStartedPulling="2026-01-31 17:03:22.424177734 +0000 UTC m=+2050.498346413" lastFinishedPulling="2026-01-31 17:03:24.815143818 +0000 UTC m=+2052.889312487" observedRunningTime="2026-01-31 17:03:25.469090121 +0000 UTC m=+2053.543258820" watchObservedRunningTime="2026-01-31 17:03:25.474460595 +0000 UTC m=+2053.548629264" Jan 31 17:03:28 crc kubenswrapper[4769]: I0131 17:03:28.708933 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:03:28 crc kubenswrapper[4769]: I0131 17:03:28.710864 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:03:28 crc kubenswrapper[4769]: I0131 17:03:28.711169 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:03:28 crc kubenswrapper[4769]: I0131 17:03:28.711334 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:03:28 crc kubenswrapper[4769]: E0131 17:03:28.711964 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:29 crc kubenswrapper[4769]: I0131 17:03:29.708839 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:03:29 crc kubenswrapper[4769]: I0131 17:03:29.708897 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:03:29 crc kubenswrapper[4769]: E0131 17:03:29.709422 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:03:31 crc kubenswrapper[4769]: I0131 17:03:31.689330 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:31 crc kubenswrapper[4769]: I0131 17:03:31.689393 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:31 crc kubenswrapper[4769]: I0131 17:03:31.789724 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:32 crc kubenswrapper[4769]: I0131 17:03:32.551286 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:34 crc kubenswrapper[4769]: I0131 17:03:34.337011 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:34 crc kubenswrapper[4769]: I0131 17:03:34.528986 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vrjlv" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="registry-server" containerID="cri-o://9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc" gracePeriod=2 Jan 31 17:03:34 crc kubenswrapper[4769]: I0131 17:03:34.970908 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.054296 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities\") pod \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.054480 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content\") pod \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.054561 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkcvb\" (UniqueName: \"kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb\") pod \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\" (UID: \"a60394d0-ff2f-4f1b-b772-974bf5c1d627\") " Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.056536 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities" (OuterVolumeSpecName: "utilities") pod "a60394d0-ff2f-4f1b-b772-974bf5c1d627" (UID: "a60394d0-ff2f-4f1b-b772-974bf5c1d627"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.065551 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb" (OuterVolumeSpecName: "kube-api-access-zkcvb") pod "a60394d0-ff2f-4f1b-b772-974bf5c1d627" (UID: "a60394d0-ff2f-4f1b-b772-974bf5c1d627"). InnerVolumeSpecName "kube-api-access-zkcvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.106113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a60394d0-ff2f-4f1b-b772-974bf5c1d627" (UID: "a60394d0-ff2f-4f1b-b772-974bf5c1d627"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.158307 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.158346 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkcvb\" (UniqueName: \"kubernetes.io/projected/a60394d0-ff2f-4f1b-b772-974bf5c1d627-kube-api-access-zkcvb\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.158360 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a60394d0-ff2f-4f1b-b772-974bf5c1d627-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.557487 4769 generic.go:334] "Generic (PLEG): container finished" podID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerID="9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc" exitCode=0 Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.557578 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrjlv" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.557597 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerDied","Data":"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc"} Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.558031 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrjlv" event={"ID":"a60394d0-ff2f-4f1b-b772-974bf5c1d627","Type":"ContainerDied","Data":"bb8baf72e207d49fc36700bea5ac9d6bb5150b95dfebec35670648088e065879"} Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.558049 4769 scope.go:117] "RemoveContainer" containerID="9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.574469 4769 scope.go:117] "RemoveContainer" containerID="c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.590765 4769 scope.go:117] "RemoveContainer" containerID="ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.598651 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.603240 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vrjlv"] Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.631783 4769 scope.go:117] "RemoveContainer" containerID="9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc" Jan 31 17:03:35 crc kubenswrapper[4769]: E0131 17:03:35.632277 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc\": container with ID starting with 9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc not found: ID does not exist" containerID="9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.632317 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc"} err="failed to get container status \"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc\": rpc error: code = NotFound desc = could not find container \"9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc\": container with ID starting with 9aa1f20d55e65f83903b62c8fd08a79165b05503c22406c6fc8538d6adb486bc not found: ID does not exist" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.632344 4769 scope.go:117] "RemoveContainer" containerID="c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba" Jan 31 17:03:35 crc kubenswrapper[4769]: E0131 17:03:35.632634 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba\": container with ID starting with c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba not found: ID does not exist" containerID="c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.632653 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba"} err="failed to get container status \"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba\": rpc error: code = NotFound desc = could not find container \"c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba\": container with ID starting with c2599f8c804458f433ea8f3663e5f48779c255b3151a30513859b7dc90ebe3ba not found: ID does not exist" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.632665 4769 scope.go:117] "RemoveContainer" containerID="ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418" Jan 31 17:03:35 crc kubenswrapper[4769]: E0131 17:03:35.632856 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418\": container with ID starting with ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418 not found: ID does not exist" containerID="ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418" Jan 31 17:03:35 crc kubenswrapper[4769]: I0131 17:03:35.632875 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418"} err="failed to get container status \"ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418\": rpc error: code = NotFound desc = could not find container \"ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418\": container with ID starting with ff180f1c2284be788c44ed29c2cb603592e080ac17c8f337c52ab66737b3e418 not found: ID does not exist" Jan 31 17:03:36 crc kubenswrapper[4769]: I0131 17:03:36.718601 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" path="/var/lib/kubelet/pods/a60394d0-ff2f-4f1b-b772-974bf5c1d627/volumes" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.355116 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:38 crc kubenswrapper[4769]: E0131 17:03:38.356225 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="extract-utilities" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.356251 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="extract-utilities" Jan 31 17:03:38 crc kubenswrapper[4769]: E0131 17:03:38.356304 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="extract-content" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.356319 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="extract-content" Jan 31 17:03:38 crc kubenswrapper[4769]: E0131 17:03:38.356370 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="registry-server" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.356388 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="registry-server" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.356749 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a60394d0-ff2f-4f1b-b772-974bf5c1d627" containerName="registry-server" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.358926 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.370373 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.508805 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.508898 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.508942 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fqbc\" (UniqueName: \"kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.610756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.610806 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fqbc\" (UniqueName: \"kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.610920 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.611290 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.611529 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.635747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fqbc\" (UniqueName: \"kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc\") pod \"redhat-operators-r5psx\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.689055 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:38 crc kubenswrapper[4769]: I0131 17:03:38.982783 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:38 crc kubenswrapper[4769]: W0131 17:03:38.990243 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb8f5e2d_f2e0_44fe_b885_34144b94e667.slice/crio-61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694 WatchSource:0}: Error finding container 61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694: Status 404 returned error can't find the container with id 61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694 Jan 31 17:03:39 crc kubenswrapper[4769]: I0131 17:03:39.589793 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerID="d21f161ee49699cf6d88f554d921ffb0a17b36f5e7fb6bc7b82fbb3c661d9fa8" exitCode=0 Jan 31 17:03:39 crc kubenswrapper[4769]: I0131 17:03:39.590023 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerDied","Data":"d21f161ee49699cf6d88f554d921ffb0a17b36f5e7fb6bc7b82fbb3c661d9fa8"} Jan 31 17:03:39 crc kubenswrapper[4769]: I0131 17:03:39.590773 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerStarted","Data":"61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694"} Jan 31 17:03:40 crc kubenswrapper[4769]: I0131 17:03:40.599259 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerStarted","Data":"ab3e46964d289c6ae545f187ef635813f81c07319c44d51ad6191c861a92d087"} Jan 31 17:03:41 crc kubenswrapper[4769]: I0131 17:03:41.608633 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerID="ab3e46964d289c6ae545f187ef635813f81c07319c44d51ad6191c861a92d087" exitCode=0 Jan 31 17:03:41 crc kubenswrapper[4769]: I0131 17:03:41.608736 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerDied","Data":"ab3e46964d289c6ae545f187ef635813f81c07319c44d51ad6191c861a92d087"} Jan 31 17:03:42 crc kubenswrapper[4769]: I0131 17:03:42.618573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerStarted","Data":"0ac275e4e43e260eb8fbd43a7936dd3c6a61b12c495bf82651970ffafb51e947"} Jan 31 17:03:42 crc kubenswrapper[4769]: I0131 17:03:42.641805 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r5psx" podStartSLOduration=2.125126044 podStartE2EDuration="4.641787222s" podCreationTimestamp="2026-01-31 17:03:38 +0000 UTC" firstStartedPulling="2026-01-31 17:03:39.592000375 +0000 UTC m=+2067.666169044" lastFinishedPulling="2026-01-31 17:03:42.108661553 +0000 UTC m=+2070.182830222" observedRunningTime="2026-01-31 17:03:42.641154575 +0000 UTC m=+2070.715323264" watchObservedRunningTime="2026-01-31 17:03:42.641787222 +0000 UTC m=+2070.715955891" Jan 31 17:03:43 crc kubenswrapper[4769]: I0131 17:03:43.708920 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:03:43 crc kubenswrapper[4769]: I0131 17:03:43.709000 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:03:43 crc kubenswrapper[4769]: I0131 17:03:43.709086 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:03:43 crc kubenswrapper[4769]: I0131 17:03:43.709119 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:03:43 crc kubenswrapper[4769]: E0131 17:03:43.709399 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:44 crc kubenswrapper[4769]: I0131 17:03:44.709660 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:03:44 crc kubenswrapper[4769]: I0131 17:03:44.710039 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:03:44 crc kubenswrapper[4769]: E0131 17:03:44.710247 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:03:48 crc kubenswrapper[4769]: I0131 17:03:48.689838 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:48 crc kubenswrapper[4769]: I0131 17:03:48.690157 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:48 crc kubenswrapper[4769]: I0131 17:03:48.746292 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:49 crc kubenswrapper[4769]: I0131 17:03:49.742854 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:49 crc kubenswrapper[4769]: I0131 17:03:49.808647 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:50 crc kubenswrapper[4769]: I0131 17:03:50.682671 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:03:50 crc kubenswrapper[4769]: I0131 17:03:50.682756 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:03:51 crc kubenswrapper[4769]: I0131 17:03:51.686268 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r5psx" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="registry-server" containerID="cri-o://0ac275e4e43e260eb8fbd43a7936dd3c6a61b12c495bf82651970ffafb51e947" gracePeriod=2 Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.696653 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerID="0ac275e4e43e260eb8fbd43a7936dd3c6a61b12c495bf82651970ffafb51e947" exitCode=0 Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.696834 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerDied","Data":"0ac275e4e43e260eb8fbd43a7936dd3c6a61b12c495bf82651970ffafb51e947"} Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.697078 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r5psx" event={"ID":"fb8f5e2d-f2e0-44fe-b885-34144b94e667","Type":"ContainerDied","Data":"61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694"} Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.697096 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61fae8792da234701785de6cdb6633c62942165b5e4868308d6ac5657eb3b694" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.711318 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.853132 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fqbc\" (UniqueName: \"kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc\") pod \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.853230 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content\") pod \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.853279 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities\") pod \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\" (UID: \"fb8f5e2d-f2e0-44fe-b885-34144b94e667\") " Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.854313 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities" (OuterVolumeSpecName: "utilities") pod "fb8f5e2d-f2e0-44fe-b885-34144b94e667" (UID: "fb8f5e2d-f2e0-44fe-b885-34144b94e667"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.858201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc" (OuterVolumeSpecName: "kube-api-access-9fqbc") pod "fb8f5e2d-f2e0-44fe-b885-34144b94e667" (UID: "fb8f5e2d-f2e0-44fe-b885-34144b94e667"). InnerVolumeSpecName "kube-api-access-9fqbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.955707 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.955759 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fqbc\" (UniqueName: \"kubernetes.io/projected/fb8f5e2d-f2e0-44fe-b885-34144b94e667-kube-api-access-9fqbc\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:52 crc kubenswrapper[4769]: I0131 17:03:52.991068 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb8f5e2d-f2e0-44fe-b885-34144b94e667" (UID: "fb8f5e2d-f2e0-44fe-b885-34144b94e667"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:03:53 crc kubenswrapper[4769]: I0131 17:03:53.057331 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb8f5e2d-f2e0-44fe-b885-34144b94e667-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:03:53 crc kubenswrapper[4769]: I0131 17:03:53.705081 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r5psx" Jan 31 17:03:53 crc kubenswrapper[4769]: I0131 17:03:53.770645 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:53 crc kubenswrapper[4769]: I0131 17:03:53.785623 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r5psx"] Jan 31 17:03:54 crc kubenswrapper[4769]: I0131 17:03:54.709907 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:03:54 crc kubenswrapper[4769]: I0131 17:03:54.710096 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:03:54 crc kubenswrapper[4769]: I0131 17:03:54.710369 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:03:54 crc kubenswrapper[4769]: I0131 17:03:54.710474 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:03:54 crc kubenswrapper[4769]: E0131 17:03:54.711552 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:03:54 crc kubenswrapper[4769]: I0131 17:03:54.725328 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" path="/var/lib/kubelet/pods/fb8f5e2d-f2e0-44fe-b885-34144b94e667/volumes" Jan 31 17:03:55 crc kubenswrapper[4769]: I0131 17:03:55.709119 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:03:55 crc kubenswrapper[4769]: I0131 17:03:55.709625 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:03:55 crc kubenswrapper[4769]: E0131 17:03:55.710129 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:06 crc kubenswrapper[4769]: I0131 17:04:06.708317 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:04:06 crc kubenswrapper[4769]: I0131 17:04:06.708941 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:06 crc kubenswrapper[4769]: E0131 17:04:06.709483 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:07 crc kubenswrapper[4769]: I0131 17:04:07.708836 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:07 crc kubenswrapper[4769]: I0131 17:04:07.708963 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:07 crc kubenswrapper[4769]: I0131 17:04:07.709139 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:07 crc kubenswrapper[4769]: I0131 17:04:07.709206 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:07 crc kubenswrapper[4769]: E0131 17:04:07.709712 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.867105 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768" exitCode=1 Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.867168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768"} Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.867217 4769 scope.go:117] "RemoveContainer" containerID="a0b2ba6d51ec40d5d594dcf3fccf60ddfb8791afe72e3b8c322cf69d789ddbfd" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.868208 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.868320 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.868363 4769 scope.go:117] "RemoveContainer" containerID="855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.868540 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:08 crc kubenswrapper[4769]: I0131 17:04:08.868607 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:08 crc kubenswrapper[4769]: E0131 17:04:08.869116 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.681867 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.682372 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.682415 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.683002 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.683056 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b" gracePeriod=600 Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.996302 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b" exitCode=0 Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.996381 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b"} Jan 31 17:04:20 crc kubenswrapper[4769]: I0131 17:04:20.996746 4769 scope.go:117] "RemoveContainer" containerID="5953b98da67a838c445c5de169d2507557e859d1573f7aa06c047f960972389b" Jan 31 17:04:21 crc kubenswrapper[4769]: I0131 17:04:21.708339 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:04:21 crc kubenswrapper[4769]: I0131 17:04:21.708881 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:21 crc kubenswrapper[4769]: E0131 17:04:21.709237 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:22 crc kubenswrapper[4769]: I0131 17:04:22.008962 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1"} Jan 31 17:04:23 crc kubenswrapper[4769]: I0131 17:04:23.708403 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:23 crc kubenswrapper[4769]: I0131 17:04:23.708932 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:23 crc kubenswrapper[4769]: I0131 17:04:23.708994 4769 scope.go:117] "RemoveContainer" containerID="855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768" Jan 31 17:04:23 crc kubenswrapper[4769]: I0131 17:04:23.709149 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:23 crc kubenswrapper[4769]: I0131 17:04:23.709217 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:23 crc kubenswrapper[4769]: E0131 17:04:23.902154 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:24 crc kubenswrapper[4769]: I0131 17:04:24.030457 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86"} Jan 31 17:04:24 crc kubenswrapper[4769]: I0131 17:04:24.031210 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:24 crc kubenswrapper[4769]: I0131 17:04:24.031276 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:24 crc kubenswrapper[4769]: I0131 17:04:24.031360 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:24 crc kubenswrapper[4769]: I0131 17:04:24.031395 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:24 crc kubenswrapper[4769]: E0131 17:04:24.031655 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.083966 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b" exitCode=1 Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.084004 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b"} Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.084817 4769 scope.go:117] "RemoveContainer" containerID="b2ba4adff2ae1ce1a2aae1094c4aaa7d2e260cea91f7d8580dd53fb0f834f1bc" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.085828 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.085946 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.086108 4769 scope.go:117] "RemoveContainer" containerID="84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.086142 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:30 crc kubenswrapper[4769]: I0131 17:04:30.086208 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:30 crc kubenswrapper[4769]: E0131 17:04:30.090772 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:32 crc kubenswrapper[4769]: I0131 17:04:32.712687 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:04:32 crc kubenswrapper[4769]: I0131 17:04:32.713282 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:32 crc kubenswrapper[4769]: E0131 17:04:32.905427 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:33 crc kubenswrapper[4769]: I0131 17:04:33.116879 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01"} Jan 31 17:04:33 crc kubenswrapper[4769]: I0131 17:04:33.117099 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:04:33 crc kubenswrapper[4769]: I0131 17:04:33.117607 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:33 crc kubenswrapper[4769]: E0131 17:04:33.117947 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:34 crc kubenswrapper[4769]: I0131 17:04:34.124361 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:34 crc kubenswrapper[4769]: E0131 17:04:34.124855 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:38 crc kubenswrapper[4769]: I0131 17:04:38.649966 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:41 crc kubenswrapper[4769]: I0131 17:04:41.647909 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:41 crc kubenswrapper[4769]: I0131 17:04:41.648054 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:42 crc kubenswrapper[4769]: I0131 17:04:42.712890 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:42 crc kubenswrapper[4769]: I0131 17:04:42.712959 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:42 crc kubenswrapper[4769]: I0131 17:04:42.713030 4769 scope.go:117] "RemoveContainer" containerID="84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b" Jan 31 17:04:42 crc kubenswrapper[4769]: I0131 17:04:42.713038 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:42 crc kubenswrapper[4769]: I0131 17:04:42.713069 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:42 crc kubenswrapper[4769]: E0131 17:04:42.858209 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:43 crc kubenswrapper[4769]: I0131 17:04:43.197250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7"} Jan 31 17:04:43 crc kubenswrapper[4769]: I0131 17:04:43.197912 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:43 crc kubenswrapper[4769]: I0131 17:04:43.197975 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:43 crc kubenswrapper[4769]: I0131 17:04:43.198059 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:43 crc kubenswrapper[4769]: I0131 17:04:43.198091 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:43 crc kubenswrapper[4769]: E0131 17:04:43.198330 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.647658 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.648108 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.649153 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.649190 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.649241 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01" gracePeriod=30 Jan 31 17:04:44 crc kubenswrapper[4769]: I0131 17:04:44.650525 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:44 crc kubenswrapper[4769]: E0131 17:04:44.930350 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.217392 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01" exitCode=0 Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.217432 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01"} Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.217459 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37"} Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.217474 4769 scope.go:117] "RemoveContainer" containerID="1ac6c3fca74fd84fdeae5ef2487d972493932dc0800386987645fa8647ffc868" Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.217779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:04:45 crc kubenswrapper[4769]: I0131 17:04:45.218276 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:45 crc kubenswrapper[4769]: E0131 17:04:45.218704 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:46 crc kubenswrapper[4769]: I0131 17:04:46.228628 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:46 crc kubenswrapper[4769]: E0131 17:04:46.229171 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:50 crc kubenswrapper[4769]: I0131 17:04:50.650401 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:51 crc kubenswrapper[4769]: I0131 17:04:51.648403 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:53 crc kubenswrapper[4769]: I0131 17:04:53.647767 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:55 crc kubenswrapper[4769]: I0131 17:04:55.708896 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:04:55 crc kubenswrapper[4769]: I0131 17:04:55.709289 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:04:55 crc kubenswrapper[4769]: I0131 17:04:55.709477 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:04:55 crc kubenswrapper[4769]: I0131 17:04:55.709576 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:04:55 crc kubenswrapper[4769]: E0131 17:04:55.710175 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.647224 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.647276 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.648041 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.648817 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.648940 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.649050 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" gracePeriod=30 Jan 31 17:04:56 crc kubenswrapper[4769]: I0131 17:04:56.650183 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:04:56 crc kubenswrapper[4769]: E0131 17:04:56.769366 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:57 crc kubenswrapper[4769]: I0131 17:04:57.321439 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" exitCode=0 Jan 31 17:04:57 crc kubenswrapper[4769]: I0131 17:04:57.321537 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37"} Jan 31 17:04:57 crc kubenswrapper[4769]: I0131 17:04:57.321960 4769 scope.go:117] "RemoveContainer" containerID="9b846fd8e8f3ee8a462a2ac54084f2e7812f1cce7ef55cc7ce40070387ba3b01" Jan 31 17:04:57 crc kubenswrapper[4769]: I0131 17:04:57.322158 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:04:57 crc kubenswrapper[4769]: I0131 17:04:57.322202 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:57 crc kubenswrapper[4769]: E0131 17:04:57.322664 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:04:58 crc kubenswrapper[4769]: I0131 17:04:58.335590 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:04:58 crc kubenswrapper[4769]: I0131 17:04:58.335632 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:04:58 crc kubenswrapper[4769]: E0131 17:04:58.336016 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:05:08 crc kubenswrapper[4769]: I0131 17:05:08.709172 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:05:08 crc kubenswrapper[4769]: I0131 17:05:08.709902 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:05:08 crc kubenswrapper[4769]: I0131 17:05:08.710101 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:05:08 crc kubenswrapper[4769]: I0131 17:05:08.710153 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:05:08 crc kubenswrapper[4769]: E0131 17:05:08.710716 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:05:09 crc kubenswrapper[4769]: I0131 17:05:09.709227 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:05:09 crc kubenswrapper[4769]: I0131 17:05:09.709281 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:05:09 crc kubenswrapper[4769]: E0131 17:05:09.709774 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:05:20 crc kubenswrapper[4769]: I0131 17:05:20.352996 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:05:20 crc kubenswrapper[4769]: E0131 17:05:20.353383 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:05:20 crc kubenswrapper[4769]: E0131 17:05:20.353688 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:07:22.353666284 +0000 UTC m=+2290.427834963 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:05:21 crc kubenswrapper[4769]: I0131 17:05:21.708193 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:05:21 crc kubenswrapper[4769]: I0131 17:05:21.708641 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:05:21 crc kubenswrapper[4769]: E0131 17:05:21.708999 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:05:22 crc kubenswrapper[4769]: I0131 17:05:22.711779 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:05:22 crc kubenswrapper[4769]: I0131 17:05:22.711861 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:05:22 crc kubenswrapper[4769]: I0131 17:05:22.711944 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:05:22 crc kubenswrapper[4769]: I0131 17:05:22.711975 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:05:22 crc kubenswrapper[4769]: E0131 17:05:22.712220 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:05:25 crc kubenswrapper[4769]: E0131 17:05:25.421184 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:05:25 crc kubenswrapper[4769]: I0131 17:05:25.568795 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:05:32 crc kubenswrapper[4769]: I0131 17:05:32.718894 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:05:32 crc kubenswrapper[4769]: I0131 17:05:32.719985 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:05:32 crc kubenswrapper[4769]: E0131 17:05:32.720462 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:05:33 crc kubenswrapper[4769]: I0131 17:05:33.708103 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:05:33 crc kubenswrapper[4769]: I0131 17:05:33.708174 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:05:33 crc kubenswrapper[4769]: I0131 17:05:33.708255 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:05:33 crc kubenswrapper[4769]: I0131 17:05:33.708287 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:05:33 crc kubenswrapper[4769]: E0131 17:05:33.708541 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:05:45 crc kubenswrapper[4769]: I0131 17:05:45.708449 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:05:45 crc kubenswrapper[4769]: I0131 17:05:45.709158 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:05:45 crc kubenswrapper[4769]: E0131 17:05:45.709753 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:05:48 crc kubenswrapper[4769]: I0131 17:05:48.708735 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:05:48 crc kubenswrapper[4769]: I0131 17:05:48.708974 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:05:48 crc kubenswrapper[4769]: I0131 17:05:48.709056 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:05:48 crc kubenswrapper[4769]: I0131 17:05:48.709102 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:05:48 crc kubenswrapper[4769]: E0131 17:05:48.709373 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:00 crc kubenswrapper[4769]: I0131 17:06:00.708920 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:06:00 crc kubenswrapper[4769]: I0131 17:06:00.709735 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:06:00 crc kubenswrapper[4769]: E0131 17:06:00.710162 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:06:02 crc kubenswrapper[4769]: I0131 17:06:02.713757 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:06:02 crc kubenswrapper[4769]: I0131 17:06:02.714240 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:06:02 crc kubenswrapper[4769]: I0131 17:06:02.714431 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:06:02 crc kubenswrapper[4769]: I0131 17:06:02.714575 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:06:02 crc kubenswrapper[4769]: E0131 17:06:02.715204 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:12 crc kubenswrapper[4769]: I0131 17:06:12.712308 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:06:12 crc kubenswrapper[4769]: I0131 17:06:12.712956 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:06:12 crc kubenswrapper[4769]: E0131 17:06:12.713181 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:06:15 crc kubenswrapper[4769]: I0131 17:06:15.709618 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:06:15 crc kubenswrapper[4769]: I0131 17:06:15.710075 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:06:15 crc kubenswrapper[4769]: I0131 17:06:15.710258 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:06:15 crc kubenswrapper[4769]: I0131 17:06:15.710358 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:06:15 crc kubenswrapper[4769]: E0131 17:06:15.710861 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:20 crc kubenswrapper[4769]: I0131 17:06:20.681928 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:06:20 crc kubenswrapper[4769]: I0131 17:06:20.682186 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:06:27 crc kubenswrapper[4769]: I0131 17:06:27.709129 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:06:27 crc kubenswrapper[4769]: I0131 17:06:27.709575 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:06:27 crc kubenswrapper[4769]: E0131 17:06:27.710064 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:06:30 crc kubenswrapper[4769]: I0131 17:06:30.709941 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:06:30 crc kubenswrapper[4769]: I0131 17:06:30.710747 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:06:30 crc kubenswrapper[4769]: I0131 17:06:30.710971 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:06:30 crc kubenswrapper[4769]: I0131 17:06:30.711053 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:06:30 crc kubenswrapper[4769]: E0131 17:06:30.711786 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:42 crc kubenswrapper[4769]: I0131 17:06:42.716838 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:06:42 crc kubenswrapper[4769]: I0131 17:06:42.717342 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:06:42 crc kubenswrapper[4769]: E0131 17:06:42.717588 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:06:44 crc kubenswrapper[4769]: I0131 17:06:44.711473 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:06:44 crc kubenswrapper[4769]: I0131 17:06:44.711992 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:06:44 crc kubenswrapper[4769]: I0131 17:06:44.712191 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:06:44 crc kubenswrapper[4769]: I0131 17:06:44.712261 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:06:44 crc kubenswrapper[4769]: E0131 17:06:44.712780 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:50 crc kubenswrapper[4769]: I0131 17:06:50.682800 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:06:50 crc kubenswrapper[4769]: I0131 17:06:50.683658 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.379962 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86" exitCode=1 Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.380009 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86"} Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.380048 4769 scope.go:117] "RemoveContainer" containerID="855e4b559e0049ffffed208d1b0d85aa658628233a0b4672bb3a638bb95f6768" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.380839 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.380910 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.380947 4769 scope.go:117] "RemoveContainer" containerID="e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.381041 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:06:52 crc kubenswrapper[4769]: I0131 17:06:52.381088 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:06:52 crc kubenswrapper[4769]: E0131 17:06:52.381476 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:06:55 crc kubenswrapper[4769]: I0131 17:06:55.708156 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:06:55 crc kubenswrapper[4769]: I0131 17:06:55.708721 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:06:55 crc kubenswrapper[4769]: E0131 17:06:55.708929 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.055929 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 31 17:06:59 crc kubenswrapper[4769]: E0131 17:06:59.056354 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="extract-utilities" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.056376 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="extract-utilities" Jan 31 17:06:59 crc kubenswrapper[4769]: E0131 17:06:59.056405 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="registry-server" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.056417 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="registry-server" Jan 31 17:06:59 crc kubenswrapper[4769]: E0131 17:06:59.056457 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="extract-content" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.056472 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="extract-content" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.056768 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8f5e2d-f2e0-44fe-b885-34144b94e667" containerName="registry-server" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.065956 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.072479 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.081478 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.083557 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.114909 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.200839 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-lock\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.200904 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-etc-swift\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.200932 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-cache\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.200970 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q2zl\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-kube-api-access-9q2zl\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201009 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-cache\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201043 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-lock\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201082 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-etc-swift\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201114 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201139 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrss7\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-kube-api-access-vrss7\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.201160 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.301882 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q2zl\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-kube-api-access-9q2zl\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.301936 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-cache\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.301964 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-lock\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.301997 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-etc-swift\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302028 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302049 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrss7\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-kube-api-access-vrss7\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302071 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302147 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-lock\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302180 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-etc-swift\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-cache\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302428 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") device mount path \"/mnt/openstack/pv01\"" pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302560 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") device mount path \"/mnt/openstack/pv12\"" pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302642 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-lock\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302650 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-cache\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302655 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-lock\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.302763 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-cache\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.309927 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-etc-swift\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.310477 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-etc-swift\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.318618 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrss7\" (UniqueName: \"kubernetes.io/projected/fd794dbe-f3dd-4a87-8b3f-612f46a05b2b-kube-api-access-vrss7\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.325845 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q2zl\" (UniqueName: \"kubernetes.io/projected/a2e45b15-42ba-44b4-91c5-fa5bc64d7a43-kube-api-access-9q2zl\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.327065 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-2\" (UID: \"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b\") " pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.329545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-1\" (UID: \"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43\") " pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.397817 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-1" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.414988 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-storage-2" Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.713269 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-2"] Jan 31 17:06:59 crc kubenswrapper[4769]: I0131 17:06:59.856594 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["swift-kuttl-tests/swift-storage-1"] Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.466240 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"1b6a18f12df506c5dc77f9b2cf1bf9686db020ad8ae6789ec3656774b42781b6"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.466282 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"b75ca78e3e732a2fda632566c5f33677ad5d96a1a4ba40cb2b7ce0b76d12a238"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.466291 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"5ee1eb556fbe4a7a84a2c70888748fa2e164745f2fb43e4758e572ba0a822681"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.466299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"ccc3114d34315f285207afc717b1766c32cf52bef5b0d8b9ada6e49075ed7155"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.467962 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="4eb802191aa38b7113b898d1194ff0e49185f91348f7bf19e65a3c8b93a0af51" exitCode=1 Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.467985 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"965e784b5800b2dbce2ae160fdb9324ff0a82d16f57d326098da1c9500c66f37"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.467999 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"66b4016622500cd6ba265da6d285874996f13e55cb233531b5cbc0b9096dc84d"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.468007 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"4eb802191aa38b7113b898d1194ff0e49185f91348f7bf19e65a3c8b93a0af51"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.468017 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"3ef332fb353d32206e7071f04f983429b2ca639f4260731d3a8a308dd0b8a9b7"} Jan 31 17:07:00 crc kubenswrapper[4769]: I0131 17:07:00.468025 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"894d68380a2549b4e540d1ae2d63b761bbecbc739b19402c6fc009f0e7adde2f"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.484919 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="b75ca78e3e732a2fda632566c5f33677ad5d96a1a4ba40cb2b7ce0b76d12a238" exitCode=1 Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485235 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="9feb05d2be863420819b2b5e5eddfab94c6ab229b91ab60523f4faf3af9f38db" exitCode=1 Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.484955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"b75ca78e3e732a2fda632566c5f33677ad5d96a1a4ba40cb2b7ce0b76d12a238"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485293 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"9548c7d87d21293e08861839a4c106b148191a1dbd979aeb4968fd136b25f122"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485318 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"98abc0c933bafe3a911d9b6d714e2f6641cd93b6444f4987481683372a261347"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485327 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"49ba9b1bc3b013ee758a2682e8108761cf2360872cfe05b4507999708ef72711"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485336 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"51eb0b2a449bc022511f1fe81e852884a84222a92cd1c7f96f3712e72f976c7c"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485346 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"9feb05d2be863420819b2b5e5eddfab94c6ab229b91ab60523f4faf3af9f38db"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485355 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"8128f65618d7458e59a2d5dd238d5001c33a0a9a5dffac1cde6b35b4dbd8dec2"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.485363 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"a2f50dab5985114ac70c1452bd7043a59b69803f2238b4a86b630448d4d298ea"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490628 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="5e693af5c86d384e5193d60606d710b817b92cd094f36500c2261e484c30e202" exitCode=1 Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490673 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"9217c3e817fa2b24c61963e7e83ebb7791e3f246610f5699c19ae5b1fc1f2cbb"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490697 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"2bfd9f640c3eb292e0c3eeaf14b7e6161ca49cfce7c7f4969679928912f7fece"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"b699f37792598b745e9d587539e25b4bc55d85f73109f6e5f8f874bcc3d939db"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490715 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"cf3815edf78281d41b22fdba9f12ad0237e640690e8100d80b38c9175962d790"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490724 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"2a74f58aca822bfa1ec35a054eed66755b9d312b4127d6182497fb1db14021bc"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"5e693af5c86d384e5193d60606d710b817b92cd094f36500c2261e484c30e202"} Jan 31 17:07:01 crc kubenswrapper[4769]: I0131 17:07:01.490770 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"b534191758672357ea7adbdaa19c64465b94b6a17e9cce93cf3a37a56f2cb429"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512198 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="7747092ea0943f7e32dbb2eceb6d21ab56eaec59948bf4c550ca7d6983e98ab4" exitCode=1 Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512263 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"f3c8b4b13c8c6f6e2631dc6ba06c4259181d30b19d561800b704b21f49184321"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512291 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"194a68a10131ff8b87aac99ca61af92639575f04d7eea7e7ec02de9367cfb4fc"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512300 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"7747092ea0943f7e32dbb2eceb6d21ab56eaec59948bf4c550ca7d6983e98ab4"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512312 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"ee30a33f60a82d2e1dae52bdc8a34b5c68ccb4fb368981ae183e0dea0860570b"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.512877 4769 scope.go:117] "RemoveContainer" containerID="4eb802191aa38b7113b898d1194ff0e49185f91348f7bf19e65a3c8b93a0af51" Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.513014 4769 scope.go:117] "RemoveContainer" containerID="5e693af5c86d384e5193d60606d710b817b92cd094f36500c2261e484c30e202" Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.513112 4769 scope.go:117] "RemoveContainer" containerID="7747092ea0943f7e32dbb2eceb6d21ab56eaec59948bf4c550ca7d6983e98ab4" Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.530979 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="c9981a7453c86900fac5bdb52476bf12ed1bf8dbe687089661db9e97e0a60702" exitCode=1 Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531019 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"2c94d2323fdb76bf6100dd24000854731f8ffb2bdd544561a4ccd5ca49b2d8df"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531043 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"9ed9681f929f5edea6562235442c736b49c278fe7d04f3e7f2a64acced886f59"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531052 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"c9981a7453c86900fac5bdb52476bf12ed1bf8dbe687089661db9e97e0a60702"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531063 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"3db3e8f1c9b29ed09a5f982f48dcce6770bd77e6d4476386694741e2eedd6d51"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531071 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"adae7b6274b51b41d41d9470bc7cba90b1ea33437b4fa10b12789dbd2173dcfe"} Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531628 4769 scope.go:117] "RemoveContainer" containerID="b75ca78e3e732a2fda632566c5f33677ad5d96a1a4ba40cb2b7ce0b76d12a238" Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531688 4769 scope.go:117] "RemoveContainer" containerID="9feb05d2be863420819b2b5e5eddfab94c6ab229b91ab60523f4faf3af9f38db" Jan 31 17:07:02 crc kubenswrapper[4769]: I0131 17:07:02.531777 4769 scope.go:117] "RemoveContainer" containerID="c9981a7453c86900fac5bdb52476bf12ed1bf8dbe687089661db9e97e0a60702" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.550279 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39" exitCode=1 Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.551207 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76" exitCode=1 Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.551349 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa" exitCode=1 Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.550553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.551783 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.551959 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.551684 4769 scope.go:117] "RemoveContainer" containerID="1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.552370 4769 scope.go:117] "RemoveContainer" containerID="0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.552740 4769 scope.go:117] "RemoveContainer" containerID="9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.552989 4769 scope.go:117] "RemoveContainer" containerID="7747092ea0943f7e32dbb2eceb6d21ab56eaec59948bf4c550ca7d6983e98ab4" Jan 31 17:07:03 crc kubenswrapper[4769]: E0131 17:07:03.553676 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.564659 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" exitCode=1 Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.564857 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" exitCode=1 Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.564859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.565098 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.565216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e"} Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.565686 4769 scope.go:117] "RemoveContainer" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.565979 4769 scope.go:117] "RemoveContainer" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" Jan 31 17:07:03 crc kubenswrapper[4769]: E0131 17:07:03.566650 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.613256 4769 scope.go:117] "RemoveContainer" containerID="5e693af5c86d384e5193d60606d710b817b92cd094f36500c2261e484c30e202" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.654274 4769 scope.go:117] "RemoveContainer" containerID="4eb802191aa38b7113b898d1194ff0e49185f91348f7bf19e65a3c8b93a0af51" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.690852 4769 scope.go:117] "RemoveContainer" containerID="9feb05d2be863420819b2b5e5eddfab94c6ab229b91ab60523f4faf3af9f38db" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.708715 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.708828 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.708882 4769 scope.go:117] "RemoveContainer" containerID="e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.708991 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.709067 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:03 crc kubenswrapper[4769]: E0131 17:07:03.709489 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:03 crc kubenswrapper[4769]: I0131 17:07:03.745537 4769 scope.go:117] "RemoveContainer" containerID="b75ca78e3e732a2fda632566c5f33677ad5d96a1a4ba40cb2b7ce0b76d12a238" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.584344 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a" exitCode=1 Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.584404 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a"} Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.584828 4769 scope.go:117] "RemoveContainer" containerID="c9981a7453c86900fac5bdb52476bf12ed1bf8dbe687089661db9e97e0a60702" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.585264 4769 scope.go:117] "RemoveContainer" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.585520 4769 scope.go:117] "RemoveContainer" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.585657 4769 scope.go:117] "RemoveContainer" containerID="09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a" Jan 31 17:07:04 crc kubenswrapper[4769]: E0131 17:07:04.586082 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.600982 4769 scope.go:117] "RemoveContainer" containerID="1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.601110 4769 scope.go:117] "RemoveContainer" containerID="0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76" Jan 31 17:07:04 crc kubenswrapper[4769]: I0131 17:07:04.601307 4769 scope.go:117] "RemoveContainer" containerID="9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39" Jan 31 17:07:04 crc kubenswrapper[4769]: E0131 17:07:04.601807 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:05 crc kubenswrapper[4769]: I0131 17:07:05.620802 4769 scope.go:117] "RemoveContainer" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" Jan 31 17:07:05 crc kubenswrapper[4769]: I0131 17:07:05.620965 4769 scope.go:117] "RemoveContainer" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" Jan 31 17:07:05 crc kubenswrapper[4769]: I0131 17:07:05.621146 4769 scope.go:117] "RemoveContainer" containerID="09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a" Jan 31 17:07:05 crc kubenswrapper[4769]: E0131 17:07:05.621831 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:06 crc kubenswrapper[4769]: I0131 17:07:06.708249 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:06 crc kubenswrapper[4769]: I0131 17:07:06.708682 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:07:06 crc kubenswrapper[4769]: E0131 17:07:06.709095 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:14 crc kubenswrapper[4769]: I0131 17:07:14.708766 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:14 crc kubenswrapper[4769]: I0131 17:07:14.709434 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:14 crc kubenswrapper[4769]: I0131 17:07:14.709465 4769 scope.go:117] "RemoveContainer" containerID="e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86" Jan 31 17:07:14 crc kubenswrapper[4769]: I0131 17:07:14.709561 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:14 crc kubenswrapper[4769]: I0131 17:07:14.709608 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:15 crc kubenswrapper[4769]: E0131 17:07:15.063782 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:15 crc kubenswrapper[4769]: I0131 17:07:15.731050 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311"} Jan 31 17:07:15 crc kubenswrapper[4769]: I0131 17:07:15.734839 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:15 crc kubenswrapper[4769]: I0131 17:07:15.735008 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:15 crc kubenswrapper[4769]: I0131 17:07:15.735256 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:15 crc kubenswrapper[4769]: I0131 17:07:15.735940 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:15 crc kubenswrapper[4769]: E0131 17:07:15.736707 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:17 crc kubenswrapper[4769]: I0131 17:07:17.708729 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:17 crc kubenswrapper[4769]: I0131 17:07:17.709012 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:07:17 crc kubenswrapper[4769]: E0131 17:07:17.709288 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708305 4769 scope.go:117] "RemoveContainer" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708399 4769 scope.go:117] "RemoveContainer" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708528 4769 scope.go:117] "RemoveContainer" containerID="1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708546 4769 scope.go:117] "RemoveContainer" containerID="09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708579 4769 scope.go:117] "RemoveContainer" containerID="0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76" Jan 31 17:07:18 crc kubenswrapper[4769]: I0131 17:07:18.708659 4769 scope.go:117] "RemoveContainer" containerID="9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39" Jan 31 17:07:19 crc kubenswrapper[4769]: E0131 17:07:19.740134 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2e45b15_42ba_44b4_91c5_fa5bc64d7a43.slice/crio-conmon-cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2e45b15_42ba_44b4_91c5_fa5bc64d7a43.slice/crio-cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9.scope\": RecentStats: unable to find data in memory cache]" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807014 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" exitCode=1 Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807064 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" exitCode=1 Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807101 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807125 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807138 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807153 4769 scope.go:117] "RemoveContainer" containerID="adb6a0f05e01e1b53e6ca33955c1f3c7d460cb477e5a427288db2b14371b43af" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807670 4769 scope.go:117] "RemoveContainer" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.807740 4769 scope.go:117] "RemoveContainer" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" Jan 31 17:07:19 crc kubenswrapper[4769]: E0131 17:07:19.808093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.825346 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" exitCode=1 Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.825391 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" exitCode=1 Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.825412 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.825449 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.825462 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610"} Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.826188 4769 scope.go:117] "RemoveContainer" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.826255 4769 scope.go:117] "RemoveContainer" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" Jan 31 17:07:19 crc kubenswrapper[4769]: E0131 17:07:19.826594 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.871332 4769 scope.go:117] "RemoveContainer" containerID="47e6ef44f4cafd40ddd8067c4d2a122ba8837ab89b1f0635dba6d2dfbef60b1e" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.910379 4769 scope.go:117] "RemoveContainer" containerID="0639325eada4d14a9892a9886fff82a11fc68ec1c4bc1eaf0e7566636606ed76" Jan 31 17:07:19 crc kubenswrapper[4769]: I0131 17:07:19.946947 4769 scope.go:117] "RemoveContainer" containerID="1221c131fec1480f788da57c2c749f36a4163f4810988c86a60d77f1a76a4daa" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.682113 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.682244 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.682331 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.683667 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.683802 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" gracePeriod=600 Jan 31 17:07:20 crc kubenswrapper[4769]: E0131 17:07:20.812036 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.847960 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d" exitCode=1 Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.848068 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d"} Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.848121 4769 scope.go:117] "RemoveContainer" containerID="9708576e2af8a1e9ec28439a6b3221f6aea24679a4eb485f36255e82496b8e39" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.849613 4769 scope.go:117] "RemoveContainer" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.850417 4769 scope.go:117] "RemoveContainer" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.850835 4769 scope.go:117] "RemoveContainer" containerID="c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d" Jan 31 17:07:20 crc kubenswrapper[4769]: E0131 17:07:20.851746 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.854038 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" exitCode=0 Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.854154 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1"} Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.855045 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:07:20 crc kubenswrapper[4769]: E0131 17:07:20.855576 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.882087 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5" exitCode=1 Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.882156 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5"} Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.884165 4769 scope.go:117] "RemoveContainer" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.884232 4769 scope.go:117] "RemoveContainer" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.884351 4769 scope.go:117] "RemoveContainer" containerID="87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5" Jan 31 17:07:20 crc kubenswrapper[4769]: E0131 17:07:20.884648 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.912277 4769 scope.go:117] "RemoveContainer" containerID="a1b936b54fa524976a558efe701fe594aef4952a8c4551ef3c20c1b46797e85b" Jan 31 17:07:20 crc kubenswrapper[4769]: I0131 17:07:20.952616 4769 scope.go:117] "RemoveContainer" containerID="09689c5c21340034799ebdace944e8395f1cdef0c983425743265006dec3c64a" Jan 31 17:07:22 crc kubenswrapper[4769]: I0131 17:07:22.440455 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:07:22 crc kubenswrapper[4769]: E0131 17:07:22.440631 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:07:22 crc kubenswrapper[4769]: E0131 17:07:22.440734 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:09:24.440706986 +0000 UTC m=+2412.514875665 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:07:28 crc kubenswrapper[4769]: E0131 17:07:28.569812 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:07:28 crc kubenswrapper[4769]: I0131 17:07:28.709559 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:28 crc kubenswrapper[4769]: I0131 17:07:28.710087 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:28 crc kubenswrapper[4769]: I0131 17:07:28.710285 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:28 crc kubenswrapper[4769]: I0131 17:07:28.710368 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:28 crc kubenswrapper[4769]: E0131 17:07:28.710869 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:28 crc kubenswrapper[4769]: I0131 17:07:28.998055 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:07:29 crc kubenswrapper[4769]: I0131 17:07:29.708016 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:29 crc kubenswrapper[4769]: I0131 17:07:29.708039 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:07:29 crc kubenswrapper[4769]: E0131 17:07:29.708221 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:32 crc kubenswrapper[4769]: I0131 17:07:32.714652 4769 scope.go:117] "RemoveContainer" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" Jan 31 17:07:32 crc kubenswrapper[4769]: I0131 17:07:32.715021 4769 scope.go:117] "RemoveContainer" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" Jan 31 17:07:32 crc kubenswrapper[4769]: I0131 17:07:32.715109 4769 scope.go:117] "RemoveContainer" containerID="c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d" Jan 31 17:07:32 crc kubenswrapper[4769]: E0131 17:07:32.715375 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:33 crc kubenswrapper[4769]: I0131 17:07:33.708776 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:07:33 crc kubenswrapper[4769]: E0131 17:07:33.709159 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:07:35 crc kubenswrapper[4769]: I0131 17:07:35.709811 4769 scope.go:117] "RemoveContainer" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" Jan 31 17:07:35 crc kubenswrapper[4769]: I0131 17:07:35.712534 4769 scope.go:117] "RemoveContainer" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" Jan 31 17:07:35 crc kubenswrapper[4769]: I0131 17:07:35.712883 4769 scope.go:117] "RemoveContainer" containerID="87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5" Jan 31 17:07:35 crc kubenswrapper[4769]: E0131 17:07:35.713587 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.093531 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7" exitCode=1 Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.093584 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7"} Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.093622 4769 scope.go:117] "RemoveContainer" containerID="84c2f4c49c1c9b12fa8714fd5a724335cb0fbeea29abb91954d762942d2a821b" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.094422 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.094528 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.094640 4769 scope.go:117] "RemoveContainer" containerID="2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.094671 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:38 crc kubenswrapper[4769]: I0131 17:07:38.094717 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:38 crc kubenswrapper[4769]: E0131 17:07:38.095066 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:43 crc kubenswrapper[4769]: I0131 17:07:43.708122 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:43 crc kubenswrapper[4769]: I0131 17:07:43.709657 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:07:43 crc kubenswrapper[4769]: E0131 17:07:43.710539 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:44 crc kubenswrapper[4769]: I0131 17:07:44.708464 4769 scope.go:117] "RemoveContainer" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" Jan 31 17:07:44 crc kubenswrapper[4769]: I0131 17:07:44.708764 4769 scope.go:117] "RemoveContainer" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" Jan 31 17:07:44 crc kubenswrapper[4769]: I0131 17:07:44.708846 4769 scope.go:117] "RemoveContainer" containerID="c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d" Jan 31 17:07:45 crc kubenswrapper[4769]: I0131 17:07:45.158436 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4"} Jan 31 17:07:45 crc kubenswrapper[4769]: I0131 17:07:45.158721 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08"} Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177658 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" exitCode=1 Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177714 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" exitCode=1 Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177731 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" exitCode=1 Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177762 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4"} Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177803 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08"} Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177828 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065"} Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.177854 4769 scope.go:117] "RemoveContainer" containerID="70ca147ec5dcb174928622dd5f4563db68c9b1258abd67b67ad3e8d862224e77" Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.178919 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.179053 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.179247 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:07:46 crc kubenswrapper[4769]: E0131 17:07:46.179787 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.260852 4769 scope.go:117] "RemoveContainer" containerID="0306fa19107480df3101d7f01ee854238261e27bead9c88492e9d23cb0d3b610" Jan 31 17:07:46 crc kubenswrapper[4769]: I0131 17:07:46.306729 4769 scope.go:117] "RemoveContainer" containerID="c32f3e8e035b90c4288a4def3dd2f37122122f1a79405ed519937b10725aa99d" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.194666 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.194958 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.195055 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:07:47 crc kubenswrapper[4769]: E0131 17:07:47.195341 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.708636 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:07:47 crc kubenswrapper[4769]: E0131 17:07:47.709247 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.710608 4769 scope.go:117] "RemoveContainer" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.710769 4769 scope.go:117] "RemoveContainer" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" Jan 31 17:07:47 crc kubenswrapper[4769]: I0131 17:07:47.711023 4769 scope.go:117] "RemoveContainer" containerID="87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5" Jan 31 17:07:48 crc kubenswrapper[4769]: I0131 17:07:48.207437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9"} Jan 31 17:07:48 crc kubenswrapper[4769]: I0131 17:07:48.207472 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d"} Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.225448 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" exitCode=1 Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.225903 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" exitCode=1 Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.225934 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" exitCode=1 Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.225558 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9"} Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.226007 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d"} Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.226036 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473"} Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.226066 4769 scope.go:117] "RemoveContainer" containerID="cdddea1255130b6d433b5772fc3ab90fffa4a349443b815199cfa21fb0d3c0c9" Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.229286 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.229418 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.229642 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:07:49 crc kubenswrapper[4769]: E0131 17:07:49.230122 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.286821 4769 scope.go:117] "RemoveContainer" containerID="10fc7829689af9b54611146136ae33d198a5830361eeacbb060da6232a97ef4a" Jan 31 17:07:49 crc kubenswrapper[4769]: I0131 17:07:49.337889 4769 scope.go:117] "RemoveContainer" containerID="87b2d02e3493d169139c467b86907cd13cd9155dd3e06a912d95b929c6a3ecd5" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.255871 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.255963 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.256076 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:07:50 crc kubenswrapper[4769]: E0131 17:07:50.256474 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.709606 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.711208 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.711385 4769 scope.go:117] "RemoveContainer" containerID="2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.711406 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:07:50 crc kubenswrapper[4769]: I0131 17:07:50.711477 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:07:50 crc kubenswrapper[4769]: E0131 17:07:50.713843 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:07:58 crc kubenswrapper[4769]: I0131 17:07:58.708384 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:58 crc kubenswrapper[4769]: I0131 17:07:58.708917 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:07:58 crc kubenswrapper[4769]: E0131 17:07:58.940643 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:59 crc kubenswrapper[4769]: I0131 17:07:59.347534 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76"} Jan 31 17:07:59 crc kubenswrapper[4769]: I0131 17:07:59.348168 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:07:59 crc kubenswrapper[4769]: I0131 17:07:59.348557 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:07:59 crc kubenswrapper[4769]: E0131 17:07:59.348887 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:07:59 crc kubenswrapper[4769]: I0131 17:07:59.707909 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:07:59 crc kubenswrapper[4769]: E0131 17:07:59.708291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.362229 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" exitCode=1 Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.362295 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76"} Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.362339 4769 scope.go:117] "RemoveContainer" containerID="3646ad1992c210e8bf551ccff3f4a82477556311e79cf13f4103fcc1edb9f1a4" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.362977 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.363002 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:00 crc kubenswrapper[4769]: E0131 17:08:00.363305 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.709221 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.709351 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:08:00 crc kubenswrapper[4769]: I0131 17:08:00.709558 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:08:00 crc kubenswrapper[4769]: E0131 17:08:00.710118 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:01 crc kubenswrapper[4769]: I0131 17:08:01.377606 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:01 crc kubenswrapper[4769]: I0131 17:08:01.377648 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:01 crc kubenswrapper[4769]: E0131 17:08:01.378023 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:02 crc kubenswrapper[4769]: I0131 17:08:02.645267 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:08:02 crc kubenswrapper[4769]: I0131 17:08:02.646151 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:02 crc kubenswrapper[4769]: I0131 17:08:02.646185 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:02 crc kubenswrapper[4769]: E0131 17:08:02.646460 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.709723 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.710176 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.710332 4769 scope.go:117] "RemoveContainer" containerID="2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.710346 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.710413 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.713376 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.713648 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:08:05 crc kubenswrapper[4769]: I0131 17:08:05.714009 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:08:05 crc kubenswrapper[4769]: E0131 17:08:05.714827 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:05 crc kubenswrapper[4769]: E0131 17:08:05.929898 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:06 crc kubenswrapper[4769]: I0131 17:08:06.469471 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d"} Jan 31 17:08:06 crc kubenswrapper[4769]: I0131 17:08:06.470730 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:08:06 crc kubenswrapper[4769]: I0131 17:08:06.470826 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:08:06 crc kubenswrapper[4769]: I0131 17:08:06.470970 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:08:06 crc kubenswrapper[4769]: I0131 17:08:06.471025 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:08:06 crc kubenswrapper[4769]: E0131 17:08:06.471458 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:13 crc kubenswrapper[4769]: I0131 17:08:13.709017 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:13 crc kubenswrapper[4769]: I0131 17:08:13.709582 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:13 crc kubenswrapper[4769]: I0131 17:08:13.709625 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:08:13 crc kubenswrapper[4769]: E0131 17:08:13.710023 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:08:13 crc kubenswrapper[4769]: E0131 17:08:13.710111 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:14 crc kubenswrapper[4769]: I0131 17:08:14.711299 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:08:14 crc kubenswrapper[4769]: I0131 17:08:14.711771 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:08:14 crc kubenswrapper[4769]: I0131 17:08:14.711929 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:08:14 crc kubenswrapper[4769]: E0131 17:08:14.712362 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.573526 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" exitCode=1 Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.573571 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d"} Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.573613 4769 scope.go:117] "RemoveContainer" containerID="2b8f74cf7a78134f2f82659b1526844f5e7f23919cdce0a11d91c8d87551dbc7" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.574592 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.574718 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.574874 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.574923 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:08:15 crc kubenswrapper[4769]: I0131 17:08:15.574991 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:08:16 crc kubenswrapper[4769]: E0131 17:08:16.209468 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589243 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" exitCode=1 Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589275 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" exitCode=1 Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589301 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6"} Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589321 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a"} Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589332 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6"} Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589350 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a"} Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.589379 4769 scope.go:117] "RemoveContainer" containerID="62946bc30fa0ff5c4e547df146dd6e5243ff6ef16c16250218e2568d4be097ae" Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.590234 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.590324 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.590429 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:16 crc kubenswrapper[4769]: E0131 17:08:16.591258 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:16 crc kubenswrapper[4769]: I0131 17:08:16.642855 4769 scope.go:117] "RemoveContainer" containerID="7737b4ea15d66ecd3a1f0162917edc283af061057e4e073e1b0c4d285ababc8a" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.609376 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" exitCode=1 Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.609694 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" exitCode=1 Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.609532 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6"} Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.609732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a"} Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.609757 4769 scope.go:117] "RemoveContainer" containerID="38c57a6952923d6f8bb33f11c84f339256f80eb4733efa1a6c4093e5a7b785ad" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.610304 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.610429 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.610584 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.610600 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.610676 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:08:17 crc kubenswrapper[4769]: E0131 17:08:17.611867 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.655379 4769 scope.go:117] "RemoveContainer" containerID="3763e21c60b42d0d544a22b54f711243a20cd6d855d0b802a15c29ee20e6ebb4" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.709314 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.709420 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:08:17 crc kubenswrapper[4769]: I0131 17:08:17.709586 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:08:17 crc kubenswrapper[4769]: E0131 17:08:17.710003 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:18 crc kubenswrapper[4769]: I0131 17:08:18.626120 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:18 crc kubenswrapper[4769]: I0131 17:08:18.626191 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:18 crc kubenswrapper[4769]: I0131 17:08:18.626272 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:18 crc kubenswrapper[4769]: I0131 17:08:18.626281 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:08:18 crc kubenswrapper[4769]: I0131 17:08:18.626318 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:08:18 crc kubenswrapper[4769]: E0131 17:08:18.626658 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:25 crc kubenswrapper[4769]: I0131 17:08:25.708701 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:25 crc kubenswrapper[4769]: I0131 17:08:25.709390 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:25 crc kubenswrapper[4769]: E0131 17:08:25.709829 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:27 crc kubenswrapper[4769]: I0131 17:08:27.708979 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:08:27 crc kubenswrapper[4769]: I0131 17:08:27.709430 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:08:27 crc kubenswrapper[4769]: I0131 17:08:27.709687 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.707925 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:08:28 crc kubenswrapper[4769]: E0131 17:08:28.708588 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.718984 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" exitCode=1 Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719017 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" exitCode=1 Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719031 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026"} Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719050 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f"} Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719061 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526"} Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719075 4769 scope.go:117] "RemoveContainer" containerID="26628fcb213930c72a45e4d57f450e99405e280e2b7aa0218676e31283f92ff4" Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719480 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.719555 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:08:28 crc kubenswrapper[4769]: E0131 17:08:28.719906 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:28 crc kubenswrapper[4769]: I0131 17:08:28.771256 4769 scope.go:117] "RemoveContainer" containerID="e634ac6f1483117e77d804bf25e388b4736b898b279fe70d09548e308873ff08" Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.744612 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" exitCode=1 Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.744716 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026"} Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.744816 4769 scope.go:117] "RemoveContainer" containerID="9b3e25d95706cf3a7214027924c5f4cc2b3702400aed3f71f6bbdfefb5690065" Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.746800 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.746958 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:08:29 crc kubenswrapper[4769]: I0131 17:08:29.747389 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:08:29 crc kubenswrapper[4769]: E0131 17:08:29.748076 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:32 crc kubenswrapper[4769]: I0131 17:08:32.713856 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:08:32 crc kubenswrapper[4769]: I0131 17:08:32.714242 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:08:32 crc kubenswrapper[4769]: I0131 17:08:32.714362 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.708333 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.708662 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.708734 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.708742 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.708774 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:08:33 crc kubenswrapper[4769]: E0131 17:08:33.709047 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803718 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" exitCode=1 Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803749 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" exitCode=1 Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803758 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" exitCode=1 Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803776 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4"} Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803801 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e"} Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803811 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c"} Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.803827 4769 scope.go:117] "RemoveContainer" containerID="4f9316b1a241851c0ac080b456ef9b8a2dfacab077b57921864740a4bfb67473" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.804298 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.804359 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.804446 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:08:33 crc kubenswrapper[4769]: E0131 17:08:33.804700 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.871922 4769 scope.go:117] "RemoveContainer" containerID="e74e1b3a9c10ecf6d85141b9315328a9a56f4e496ce76e5f2965ca10dcd670f9" Jan 31 17:08:33 crc kubenswrapper[4769]: I0131 17:08:33.933280 4769 scope.go:117] "RemoveContainer" containerID="452bfc017f921450e0c56f4fbd086bb3301f3f433ac77013094801540d60ea1d" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.840160 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" exitCode=1 Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.840381 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311"} Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.840543 4769 scope.go:117] "RemoveContainer" containerID="e8cf1205aa6ab18a9ce5183ca3000df44bf01a5ba2f0fab8a4ae50c0f9e84c86" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841549 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841649 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841684 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841800 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841816 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:08:35 crc kubenswrapper[4769]: I0131 17:08:35.841869 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:08:35 crc kubenswrapper[4769]: E0131 17:08:35.842375 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:39 crc kubenswrapper[4769]: I0131 17:08:39.709304 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:39 crc kubenswrapper[4769]: I0131 17:08:39.710072 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:39 crc kubenswrapper[4769]: E0131 17:08:39.710445 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:40 crc kubenswrapper[4769]: I0131 17:08:40.708716 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:08:40 crc kubenswrapper[4769]: E0131 17:08:40.709084 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:08:41 crc kubenswrapper[4769]: I0131 17:08:41.709164 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:08:41 crc kubenswrapper[4769]: I0131 17:08:41.709695 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:08:41 crc kubenswrapper[4769]: I0131 17:08:41.709893 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:08:41 crc kubenswrapper[4769]: E0131 17:08:41.710397 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:46 crc kubenswrapper[4769]: I0131 17:08:46.708354 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:08:46 crc kubenswrapper[4769]: I0131 17:08:46.708781 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:08:46 crc kubenswrapper[4769]: I0131 17:08:46.708897 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:08:46 crc kubenswrapper[4769]: E0131 17:08:46.709274 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.979565 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="3db3e8f1c9b29ed09a5f982f48dcce6770bd77e6d4476386694741e2eedd6d51" exitCode=1 Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.979623 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"3db3e8f1c9b29ed09a5f982f48dcce6770bd77e6d4476386694741e2eedd6d51"} Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.981081 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.981230 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.981386 4769 scope.go:117] "RemoveContainer" containerID="3db3e8f1c9b29ed09a5f982f48dcce6770bd77e6d4476386694741e2eedd6d51" Jan 31 17:08:47 crc kubenswrapper[4769]: I0131 17:08:47.981424 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:08:48 crc kubenswrapper[4769]: E0131 17:08:48.146043 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:48 crc kubenswrapper[4769]: I0131 17:08:48.997661 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30"} Jan 31 17:08:48 crc kubenswrapper[4769]: I0131 17:08:48.999826 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:08:49 crc kubenswrapper[4769]: I0131 17:08:49.000046 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:08:49 crc kubenswrapper[4769]: I0131 17:08:49.000313 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:08:49 crc kubenswrapper[4769]: E0131 17:08:49.000950 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.708348 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.709630 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.709871 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.710148 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.710259 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.710403 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.710434 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:08:50 crc kubenswrapper[4769]: I0131 17:08:50.710560 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:08:50 crc kubenswrapper[4769]: E0131 17:08:50.711109 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:08:50 crc kubenswrapper[4769]: E0131 17:08:50.711320 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:08:54 crc kubenswrapper[4769]: I0131 17:08:54.708824 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:08:54 crc kubenswrapper[4769]: I0131 17:08:54.709521 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:08:54 crc kubenswrapper[4769]: I0131 17:08:54.709651 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:08:54 crc kubenswrapper[4769]: E0131 17:08:54.709984 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:08:55 crc kubenswrapper[4769]: I0131 17:08:55.708936 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:08:55 crc kubenswrapper[4769]: E0131 17:08:55.709264 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:08:59 crc kubenswrapper[4769]: I0131 17:08:59.708668 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:08:59 crc kubenswrapper[4769]: I0131 17:08:59.709064 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:08:59 crc kubenswrapper[4769]: I0131 17:08:59.709244 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:08:59 crc kubenswrapper[4769]: E0131 17:08:59.709809 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.718645 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.719703 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.719747 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.719813 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.719823 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:02 crc kubenswrapper[4769]: I0131 17:09:02.719869 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:02 crc kubenswrapper[4769]: E0131 17:09:02.902083 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.125388 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa"} Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.126067 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.126126 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.126148 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.126209 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:03 crc kubenswrapper[4769]: I0131 17:09:03.126239 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:03 crc kubenswrapper[4769]: E0131 17:09:03.126555 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:05 crc kubenswrapper[4769]: I0131 17:09:05.708849 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:09:05 crc kubenswrapper[4769]: I0131 17:09:05.709186 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:05 crc kubenswrapper[4769]: I0131 17:09:05.709424 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:05 crc kubenswrapper[4769]: I0131 17:09:05.709603 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:05 crc kubenswrapper[4769]: E0131 17:09:05.709642 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:09:05 crc kubenswrapper[4769]: I0131 17:09:05.709780 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:05 crc kubenswrapper[4769]: E0131 17:09:05.710266 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:06 crc kubenswrapper[4769]: I0131 17:09:06.708848 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:09:06 crc kubenswrapper[4769]: E0131 17:09:06.709259 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:09:11 crc kubenswrapper[4769]: I0131 17:09:11.708230 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:09:11 crc kubenswrapper[4769]: I0131 17:09:11.708731 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:09:11 crc kubenswrapper[4769]: I0131 17:09:11.708818 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:09:11 crc kubenswrapper[4769]: E0131 17:09:11.709151 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:09:16 crc kubenswrapper[4769]: I0131 17:09:16.709330 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:16 crc kubenswrapper[4769]: I0131 17:09:16.710000 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:16 crc kubenswrapper[4769]: I0131 17:09:16.710046 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:09:16 crc kubenswrapper[4769]: I0131 17:09:16.710167 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:16 crc kubenswrapper[4769]: I0131 17:09:16.710234 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:16 crc kubenswrapper[4769]: E0131 17:09:16.868019 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.282170 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7"} Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.283151 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.283224 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.283332 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.283381 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:17 crc kubenswrapper[4769]: E0131 17:09:17.283701 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.709354 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.709392 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.709473 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:17 crc kubenswrapper[4769]: I0131 17:09:17.709644 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:17 crc kubenswrapper[4769]: E0131 17:09:17.709787 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:09:17 crc kubenswrapper[4769]: E0131 17:09:17.710074 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:20 crc kubenswrapper[4769]: I0131 17:09:20.708751 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:09:20 crc kubenswrapper[4769]: I0131 17:09:20.709090 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:20 crc kubenswrapper[4769]: E0131 17:09:20.709519 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:09:24 crc kubenswrapper[4769]: I0131 17:09:24.491522 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:09:24 crc kubenswrapper[4769]: E0131 17:09:24.491763 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:09:24 crc kubenswrapper[4769]: E0131 17:09:24.492009 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:11:26.491983653 +0000 UTC m=+2534.566152342 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:09:26 crc kubenswrapper[4769]: I0131 17:09:26.708927 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:09:26 crc kubenswrapper[4769]: I0131 17:09:26.708998 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:09:26 crc kubenswrapper[4769]: I0131 17:09:26.709081 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:09:26 crc kubenswrapper[4769]: E0131 17:09:26.709378 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:09:28 crc kubenswrapper[4769]: I0131 17:09:28.709042 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:28 crc kubenswrapper[4769]: I0131 17:09:28.709618 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:28 crc kubenswrapper[4769]: I0131 17:09:28.709821 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:28 crc kubenswrapper[4769]: I0131 17:09:28.709890 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:28 crc kubenswrapper[4769]: E0131 17:09:28.710369 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:31 crc kubenswrapper[4769]: I0131 17:09:31.709219 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:31 crc kubenswrapper[4769]: I0131 17:09:31.709352 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:31 crc kubenswrapper[4769]: I0131 17:09:31.709634 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:09:31 crc kubenswrapper[4769]: I0131 17:09:31.709847 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:31 crc kubenswrapper[4769]: E0131 17:09:31.710076 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:09:31 crc kubenswrapper[4769]: E0131 17:09:31.710334 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:32 crc kubenswrapper[4769]: E0131 17:09:32.000530 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:09:32 crc kubenswrapper[4769]: I0131 17:09:32.417986 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:09:34 crc kubenswrapper[4769]: I0131 17:09:34.708372 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:09:34 crc kubenswrapper[4769]: I0131 17:09:34.708749 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:34 crc kubenswrapper[4769]: E0131 17:09:34.709098 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:09:37 crc kubenswrapper[4769]: I0131 17:09:37.709272 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:09:37 crc kubenswrapper[4769]: I0131 17:09:37.709795 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:09:37 crc kubenswrapper[4769]: I0131 17:09:37.709980 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:09:37 crc kubenswrapper[4769]: E0131 17:09:37.710459 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:09:39 crc kubenswrapper[4769]: I0131 17:09:39.709022 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:39 crc kubenswrapper[4769]: I0131 17:09:39.709219 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:39 crc kubenswrapper[4769]: I0131 17:09:39.709403 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:39 crc kubenswrapper[4769]: I0131 17:09:39.709470 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:39 crc kubenswrapper[4769]: E0131 17:09:39.710277 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:44 crc kubenswrapper[4769]: I0131 17:09:44.709002 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:44 crc kubenswrapper[4769]: I0131 17:09:44.709229 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:44 crc kubenswrapper[4769]: I0131 17:09:44.709407 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:44 crc kubenswrapper[4769]: E0131 17:09:44.709873 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:45 crc kubenswrapper[4769]: I0131 17:09:45.708092 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:09:45 crc kubenswrapper[4769]: E0131 17:09:45.708381 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:09:46 crc kubenswrapper[4769]: I0131 17:09:46.708162 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:09:46 crc kubenswrapper[4769]: I0131 17:09:46.709670 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:46 crc kubenswrapper[4769]: E0131 17:09:46.710211 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:09:50 crc kubenswrapper[4769]: I0131 17:09:50.709409 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:09:50 crc kubenswrapper[4769]: I0131 17:09:50.709839 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:09:50 crc kubenswrapper[4769]: I0131 17:09:50.709986 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:09:50 crc kubenswrapper[4769]: I0131 17:09:50.710043 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:09:50 crc kubenswrapper[4769]: E0131 17:09:50.710483 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:09:52 crc kubenswrapper[4769]: I0131 17:09:52.712081 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:09:52 crc kubenswrapper[4769]: I0131 17:09:52.712150 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:09:52 crc kubenswrapper[4769]: I0131 17:09:52.712302 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:09:52 crc kubenswrapper[4769]: E0131 17:09:52.712537 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:09:56 crc kubenswrapper[4769]: I0131 17:09:56.709762 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:56 crc kubenswrapper[4769]: I0131 17:09:56.710752 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:56 crc kubenswrapper[4769]: I0131 17:09:56.710942 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667277 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" exitCode=1 Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667570 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" exitCode=1 Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667353 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd"} Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e"} Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667640 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2"} Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.667662 4769 scope.go:117] "RemoveContainer" containerID="26eecb11bdaca971bbef9d4925f3cc3b83d73a14264e993359ef7a9dbb09364f" Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.668708 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.668855 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:09:57 crc kubenswrapper[4769]: E0131 17:09:57.692999 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:57 crc kubenswrapper[4769]: I0131 17:09:57.741713 4769 scope.go:117] "RemoveContainer" containerID="ed8eccfc97e5b6f67afae514000b2281e9d54b0d0439914d0f3ce2950e5dd526" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.683729 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" exitCode=1 Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.683763 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd"} Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.683827 4769 scope.go:117] "RemoveContainer" containerID="433f51c44360a7611334395cf0539f51d98ab50859aca7eac808ebc4929aa026" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.684546 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.684617 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.684718 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:09:58 crc kubenswrapper[4769]: E0131 17:09:58.685155 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.708477 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:09:58 crc kubenswrapper[4769]: I0131 17:09:58.708856 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:58 crc kubenswrapper[4769]: E0131 17:09:58.931918 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:09:59 crc kubenswrapper[4769]: I0131 17:09:59.695605 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12"} Jan 31 17:09:59 crc kubenswrapper[4769]: I0131 17:09:59.695862 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:09:59 crc kubenswrapper[4769]: I0131 17:09:59.698408 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:09:59 crc kubenswrapper[4769]: E0131 17:09:59.699321 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:00 crc kubenswrapper[4769]: I0131 17:10:00.708701 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:10:00 crc kubenswrapper[4769]: E0131 17:10:00.709401 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:10:00 crc kubenswrapper[4769]: I0131 17:10:00.724693 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:00 crc kubenswrapper[4769]: E0131 17:10:00.725028 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:03 crc kubenswrapper[4769]: I0131 17:10:03.709957 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:10:03 crc kubenswrapper[4769]: I0131 17:10:03.710613 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:10:03 crc kubenswrapper[4769]: I0131 17:10:03.710882 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:10:03 crc kubenswrapper[4769]: I0131 17:10:03.710981 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:10:03 crc kubenswrapper[4769]: E0131 17:10:03.711982 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:10:05 crc kubenswrapper[4769]: I0131 17:10:05.648245 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:05 crc kubenswrapper[4769]: I0131 17:10:05.708674 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:10:05 crc kubenswrapper[4769]: I0131 17:10:05.708743 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:10:05 crc kubenswrapper[4769]: I0131 17:10:05.708876 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.647153 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788609 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" exitCode=1 Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788647 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" exitCode=1 Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788659 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" exitCode=1 Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788720 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb"} Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788752 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03"} Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19"} Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.788787 4769 scope.go:117] "RemoveContainer" containerID="a0be6afe09e24d586774267ab7c53311c327ac8d2b2fc0d031aba01d811e24b4" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.789468 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.789592 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.789725 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:10:06 crc kubenswrapper[4769]: E0131 17:10:06.790106 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.799901 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="cf3815edf78281d41b22fdba9f12ad0237e640690e8100d80b38c9175962d790" exitCode=1 Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.800031 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"cf3815edf78281d41b22fdba9f12ad0237e640690e8100d80b38c9175962d790"} Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.801062 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.801223 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.801270 4769 scope.go:117] "RemoveContainer" containerID="cf3815edf78281d41b22fdba9f12ad0237e640690e8100d80b38c9175962d790" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.801413 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.844246 4769 scope.go:117] "RemoveContainer" containerID="75da31476ba7a662f7f1a29fecbd114a347aff81f4e4fcbc822ac8511d2dba8e" Jan 31 17:10:06 crc kubenswrapper[4769]: I0131 17:10:06.922930 4769 scope.go:117] "RemoveContainer" containerID="15a7430a57b4311bcab8fb2cc9275aaa4c3c1cde4b56cd9688607e10f62ad92c" Jan 31 17:10:07 crc kubenswrapper[4769]: E0131 17:10:07.026093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:07 crc kubenswrapper[4769]: I0131 17:10:07.832787 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c"} Jan 31 17:10:07 crc kubenswrapper[4769]: I0131 17:10:07.833682 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:07 crc kubenswrapper[4769]: I0131 17:10:07.834131 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:07 crc kubenswrapper[4769]: I0131 17:10:07.834238 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:07 crc kubenswrapper[4769]: E0131 17:10:07.834663 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:08 crc kubenswrapper[4769]: I0131 17:10:08.647362 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.647254 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.647754 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.647290 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.648898 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.648941 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.648993 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12" gracePeriod=30 Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.650172 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.876479 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12" exitCode=0 Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.876574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12"} Jan 31 17:10:11 crc kubenswrapper[4769]: I0131 17:10:11.876660 4769 scope.go:117] "RemoveContainer" containerID="0fa55dcb76bc686943e5b7135025fa4b96b4f5ad436cce5f2cb11ea5e727ea37" Jan 31 17:10:11 crc kubenswrapper[4769]: E0131 17:10:11.977406 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:12 crc kubenswrapper[4769]: I0131 17:10:12.886944 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926"} Jan 31 17:10:12 crc kubenswrapper[4769]: I0131 17:10:12.887100 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:10:12 crc kubenswrapper[4769]: I0131 17:10:12.887615 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:12 crc kubenswrapper[4769]: E0131 17:10:12.887839 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:13 crc kubenswrapper[4769]: I0131 17:10:13.906747 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:13 crc kubenswrapper[4769]: E0131 17:10:13.908046 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:14 crc kubenswrapper[4769]: I0131 17:10:14.178146 4769 scope.go:117] "RemoveContainer" containerID="ab3e46964d289c6ae545f187ef635813f81c07319c44d51ad6191c861a92d087" Jan 31 17:10:14 crc kubenswrapper[4769]: I0131 17:10:14.210924 4769 scope.go:117] "RemoveContainer" containerID="0ac275e4e43e260eb8fbd43a7936dd3c6a61b12c495bf82651970ffafb51e947" Jan 31 17:10:14 crc kubenswrapper[4769]: I0131 17:10:14.242651 4769 scope.go:117] "RemoveContainer" containerID="d21f161ee49699cf6d88f554d921ffb0a17b36f5e7fb6bc7b82fbb3c661d9fa8" Jan 31 17:10:15 crc kubenswrapper[4769]: I0131 17:10:15.708221 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:10:15 crc kubenswrapper[4769]: E0131 17:10:15.708539 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:10:16 crc kubenswrapper[4769]: I0131 17:10:16.647548 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:17 crc kubenswrapper[4769]: I0131 17:10:17.647596 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:18 crc kubenswrapper[4769]: I0131 17:10:18.708676 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:10:18 crc kubenswrapper[4769]: I0131 17:10:18.708832 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:10:18 crc kubenswrapper[4769]: I0131 17:10:18.709139 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:10:18 crc kubenswrapper[4769]: I0131 17:10:18.709231 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:10:18 crc kubenswrapper[4769]: E0131 17:10:18.709715 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.647698 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.709483 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.709595 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.709742 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.710136 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.710411 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:20 crc kubenswrapper[4769]: I0131 17:10:20.710808 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:20 crc kubenswrapper[4769]: E0131 17:10:20.711450 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:20 crc kubenswrapper[4769]: E0131 17:10:20.710165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:10:21 crc kubenswrapper[4769]: I0131 17:10:21.647635 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.647777 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.647849 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.648333 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.648352 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.648383 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" gracePeriod=30 Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.650274 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:10:23 crc kubenswrapper[4769]: E0131 17:10:23.771919 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.990717 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" exitCode=0 Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.990817 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926"} Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.991049 4769 scope.go:117] "RemoveContainer" containerID="adba7e61ada8b9f946448ad1098624740a7093e4c1be03881d06025667faad12" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.991768 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:10:23 crc kubenswrapper[4769]: I0131 17:10:23.991818 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:23 crc kubenswrapper[4769]: E0131 17:10:23.992288 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:29 crc kubenswrapper[4769]: I0131 17:10:29.708591 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:10:29 crc kubenswrapper[4769]: E0131 17:10:29.708973 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:10:29 crc kubenswrapper[4769]: I0131 17:10:29.709269 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:10:29 crc kubenswrapper[4769]: I0131 17:10:29.709342 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:10:29 crc kubenswrapper[4769]: I0131 17:10:29.709455 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:10:29 crc kubenswrapper[4769]: I0131 17:10:29.709541 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:10:29 crc kubenswrapper[4769]: E0131 17:10:29.709884 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:10:32 crc kubenswrapper[4769]: I0131 17:10:32.719397 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:32 crc kubenswrapper[4769]: I0131 17:10:32.719861 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:32 crc kubenswrapper[4769]: I0131 17:10:32.720061 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:32 crc kubenswrapper[4769]: E0131 17:10:32.720663 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:33 crc kubenswrapper[4769]: I0131 17:10:33.709027 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:10:33 crc kubenswrapper[4769]: I0131 17:10:33.709154 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:10:33 crc kubenswrapper[4769]: I0131 17:10:33.709332 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:10:33 crc kubenswrapper[4769]: E0131 17:10:33.709976 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:10:35 crc kubenswrapper[4769]: I0131 17:10:35.708586 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:10:35 crc kubenswrapper[4769]: I0131 17:10:35.708623 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:35 crc kubenswrapper[4769]: E0131 17:10:35.709059 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:42 crc kubenswrapper[4769]: I0131 17:10:42.713072 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:10:42 crc kubenswrapper[4769]: I0131 17:10:42.713637 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:10:42 crc kubenswrapper[4769]: I0131 17:10:42.713843 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:10:42 crc kubenswrapper[4769]: I0131 17:10:42.713917 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:10:42 crc kubenswrapper[4769]: E0131 17:10:42.714440 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:10:44 crc kubenswrapper[4769]: I0131 17:10:44.709176 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:10:44 crc kubenswrapper[4769]: E0131 17:10:44.710088 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:10:47 crc kubenswrapper[4769]: I0131 17:10:47.708869 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:47 crc kubenswrapper[4769]: I0131 17:10:47.708940 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:47 crc kubenswrapper[4769]: I0131 17:10:47.709025 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:47 crc kubenswrapper[4769]: E0131 17:10:47.709280 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.225223 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="ee30a33f60a82d2e1dae52bdc8a34b5c68ccb4fb368981ae183e0dea0860570b" exitCode=1 Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.225692 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"ee30a33f60a82d2e1dae52bdc8a34b5c68ccb4fb368981ae183e0dea0860570b"} Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.226884 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.227018 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.227215 4769 scope.go:117] "RemoveContainer" containerID="ee30a33f60a82d2e1dae52bdc8a34b5c68ccb4fb368981ae183e0dea0860570b" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.227282 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:48 crc kubenswrapper[4769]: E0131 17:10:48.407429 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.708693 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.708763 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.708848 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:10:48 crc kubenswrapper[4769]: E0131 17:10:48.709101 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.709632 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:10:48 crc kubenswrapper[4769]: I0131 17:10:48.709669 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:10:48 crc kubenswrapper[4769]: E0131 17:10:48.710353 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:10:49 crc kubenswrapper[4769]: I0131 17:10:49.249044 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba"} Jan 31 17:10:49 crc kubenswrapper[4769]: I0131 17:10:49.250604 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:10:49 crc kubenswrapper[4769]: I0131 17:10:49.250886 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:10:49 crc kubenswrapper[4769]: I0131 17:10:49.251215 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:10:49 crc kubenswrapper[4769]: E0131 17:10:49.251920 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:10:53 crc kubenswrapper[4769]: I0131 17:10:53.710043 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:10:53 crc kubenswrapper[4769]: I0131 17:10:53.711203 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:10:53 crc kubenswrapper[4769]: I0131 17:10:53.711430 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:10:53 crc kubenswrapper[4769]: I0131 17:10:53.711551 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:10:53 crc kubenswrapper[4769]: E0131 17:10:53.712192 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:10:55 crc kubenswrapper[4769]: I0131 17:10:55.709369 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:10:55 crc kubenswrapper[4769]: E0131 17:10:55.709744 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.709299 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.709768 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.709951 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.710232 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.710394 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:11:00 crc kubenswrapper[4769]: E0131 17:11:00.710431 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:00 crc kubenswrapper[4769]: I0131 17:11:00.710680 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:11:00 crc kubenswrapper[4769]: E0131 17:11:00.711354 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:11:02 crc kubenswrapper[4769]: I0131 17:11:02.714009 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:11:02 crc kubenswrapper[4769]: I0131 17:11:02.714043 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:11:02 crc kubenswrapper[4769]: E0131 17:11:02.714266 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:11:07 crc kubenswrapper[4769]: I0131 17:11:07.709103 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:11:07 crc kubenswrapper[4769]: E0131 17:11:07.710001 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:11:08 crc kubenswrapper[4769]: I0131 17:11:08.709581 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:11:08 crc kubenswrapper[4769]: I0131 17:11:08.709770 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:11:08 crc kubenswrapper[4769]: I0131 17:11:08.710092 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:11:08 crc kubenswrapper[4769]: I0131 17:11:08.710171 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:11:08 crc kubenswrapper[4769]: E0131 17:11:08.710749 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:11:13 crc kubenswrapper[4769]: I0131 17:11:13.708707 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:11:13 crc kubenswrapper[4769]: I0131 17:11:13.709096 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:11:13 crc kubenswrapper[4769]: E0131 17:11:13.709617 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.708371 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.708465 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.708627 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:15 crc kubenswrapper[4769]: E0131 17:11:15.708956 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.709253 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.709356 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:11:15 crc kubenswrapper[4769]: I0131 17:11:15.709750 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:11:15 crc kubenswrapper[4769]: E0131 17:11:15.710357 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:11:19 crc kubenswrapper[4769]: I0131 17:11:19.708245 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:11:19 crc kubenswrapper[4769]: I0131 17:11:19.709048 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:11:19 crc kubenswrapper[4769]: E0131 17:11:19.709108 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:11:19 crc kubenswrapper[4769]: I0131 17:11:19.709172 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:11:19 crc kubenswrapper[4769]: I0131 17:11:19.709391 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:11:19 crc kubenswrapper[4769]: I0131 17:11:19.709457 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:11:19 crc kubenswrapper[4769]: E0131 17:11:19.710027 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:11:26 crc kubenswrapper[4769]: I0131 17:11:26.585251 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:11:26 crc kubenswrapper[4769]: E0131 17:11:26.585446 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:11:26 crc kubenswrapper[4769]: E0131 17:11:26.586243 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:13:28.586207115 +0000 UTC m=+2656.660375824 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:11:27 crc kubenswrapper[4769]: I0131 17:11:27.708014 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:11:27 crc kubenswrapper[4769]: I0131 17:11:27.708360 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:11:27 crc kubenswrapper[4769]: E0131 17:11:27.708777 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:11:28 crc kubenswrapper[4769]: I0131 17:11:28.709162 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:28 crc kubenswrapper[4769]: I0131 17:11:28.709314 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:28 crc kubenswrapper[4769]: I0131 17:11:28.709692 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:28 crc kubenswrapper[4769]: E0131 17:11:28.710222 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:29 crc kubenswrapper[4769]: I0131 17:11:29.708999 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:11:29 crc kubenswrapper[4769]: I0131 17:11:29.709098 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:11:29 crc kubenswrapper[4769]: I0131 17:11:29.709244 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:11:29 crc kubenswrapper[4769]: E0131 17:11:29.709583 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:11:34 crc kubenswrapper[4769]: I0131 17:11:34.717823 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:11:34 crc kubenswrapper[4769]: E0131 17:11:34.719678 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:11:34 crc kubenswrapper[4769]: I0131 17:11:34.722095 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:11:34 crc kubenswrapper[4769]: I0131 17:11:34.722282 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:11:34 crc kubenswrapper[4769]: I0131 17:11:34.722876 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:11:34 crc kubenswrapper[4769]: I0131 17:11:34.722995 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:11:34 crc kubenswrapper[4769]: E0131 17:11:34.723648 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:11:35 crc kubenswrapper[4769]: E0131 17:11:35.420458 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:11:35 crc kubenswrapper[4769]: I0131 17:11:35.678193 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.712523 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="49ba9b1bc3b013ee758a2682e8108761cf2360872cfe05b4507999708ef72711" exitCode=1 Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.712570 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"49ba9b1bc3b013ee758a2682e8108761cf2360872cfe05b4507999708ef72711"} Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.713123 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.713196 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.713224 4769 scope.go:117] "RemoveContainer" containerID="49ba9b1bc3b013ee758a2682e8108761cf2360872cfe05b4507999708ef72711" Jan 31 17:11:37 crc kubenswrapper[4769]: I0131 17:11:37.713321 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:37 crc kubenswrapper[4769]: E0131 17:11:37.949032 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:38 crc kubenswrapper[4769]: I0131 17:11:38.731741 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace"} Jan 31 17:11:38 crc kubenswrapper[4769]: I0131 17:11:38.733927 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:38 crc kubenswrapper[4769]: I0131 17:11:38.734202 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:38 crc kubenswrapper[4769]: I0131 17:11:38.734570 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:38 crc kubenswrapper[4769]: E0131 17:11:38.735190 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:42 crc kubenswrapper[4769]: I0131 17:11:42.714285 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:11:42 crc kubenswrapper[4769]: I0131 17:11:42.714677 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:11:42 crc kubenswrapper[4769]: E0131 17:11:42.715015 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:11:42 crc kubenswrapper[4769]: I0131 17:11:42.715079 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:11:42 crc kubenswrapper[4769]: I0131 17:11:42.715202 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:11:42 crc kubenswrapper[4769]: I0131 17:11:42.715390 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:11:42 crc kubenswrapper[4769]: E0131 17:11:42.715831 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:11:48 crc kubenswrapper[4769]: I0131 17:11:48.708792 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:11:48 crc kubenswrapper[4769]: I0131 17:11:48.709469 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:11:48 crc kubenswrapper[4769]: I0131 17:11:48.709677 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:11:48 crc kubenswrapper[4769]: I0131 17:11:48.709746 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:11:48 crc kubenswrapper[4769]: E0131 17:11:48.710248 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:11:49 crc kubenswrapper[4769]: I0131 17:11:49.708088 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:11:49 crc kubenswrapper[4769]: E0131 17:11:49.708451 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:11:51 crc kubenswrapper[4769]: I0131 17:11:51.708489 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:11:51 crc kubenswrapper[4769]: I0131 17:11:51.708939 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:11:51 crc kubenswrapper[4769]: I0131 17:11:51.709133 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:11:51 crc kubenswrapper[4769]: E0131 17:11:51.709664 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:11:54 crc kubenswrapper[4769]: I0131 17:11:54.710268 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:11:54 crc kubenswrapper[4769]: I0131 17:11:54.710808 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:11:54 crc kubenswrapper[4769]: I0131 17:11:54.710996 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:11:54 crc kubenswrapper[4769]: E0131 17:11:54.711457 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:11:56 crc kubenswrapper[4769]: I0131 17:11:56.708714 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:11:56 crc kubenswrapper[4769]: I0131 17:11:56.708997 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:11:56 crc kubenswrapper[4769]: E0131 17:11:56.709219 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:11:59 crc kubenswrapper[4769]: I0131 17:11:59.709359 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:11:59 crc kubenswrapper[4769]: I0131 17:11:59.709525 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:11:59 crc kubenswrapper[4769]: I0131 17:11:59.709748 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:11:59 crc kubenswrapper[4769]: I0131 17:11:59.709818 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:11:59 crc kubenswrapper[4769]: E0131 17:11:59.710467 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:12:01 crc kubenswrapper[4769]: I0131 17:12:01.708787 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:12:01 crc kubenswrapper[4769]: E0131 17:12:01.709238 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:12:02 crc kubenswrapper[4769]: I0131 17:12:02.716023 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:02 crc kubenswrapper[4769]: I0131 17:12:02.716131 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:02 crc kubenswrapper[4769]: I0131 17:12:02.716280 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:02 crc kubenswrapper[4769]: E0131 17:12:02.716636 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:06 crc kubenswrapper[4769]: I0131 17:12:06.709068 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:12:06 crc kubenswrapper[4769]: I0131 17:12:06.709580 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:12:06 crc kubenswrapper[4769]: I0131 17:12:06.709761 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:12:06 crc kubenswrapper[4769]: E0131 17:12:06.710233 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.023818 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace" exitCode=1 Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.023902 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace"} Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.024275 4769 scope.go:117] "RemoveContainer" containerID="49ba9b1bc3b013ee758a2682e8108761cf2360872cfe05b4507999708ef72711" Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.025332 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.025470 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.025537 4769 scope.go:117] "RemoveContainer" containerID="77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace" Jan 31 17:12:09 crc kubenswrapper[4769]: I0131 17:12:09.025680 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:09 crc kubenswrapper[4769]: E0131 17:12:09.026231 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:10 crc kubenswrapper[4769]: I0131 17:12:10.709239 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:12:10 crc kubenswrapper[4769]: I0131 17:12:10.709285 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:12:10 crc kubenswrapper[4769]: E0131 17:12:10.709739 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:12:12 crc kubenswrapper[4769]: I0131 17:12:12.718593 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:12:12 crc kubenswrapper[4769]: I0131 17:12:12.718908 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:12:12 crc kubenswrapper[4769]: I0131 17:12:12.719007 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:12:12 crc kubenswrapper[4769]: I0131 17:12:12.719047 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:12:12 crc kubenswrapper[4769]: E0131 17:12:12.719332 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:12:14 crc kubenswrapper[4769]: I0131 17:12:14.709487 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:12:14 crc kubenswrapper[4769]: E0131 17:12:14.710378 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:12:19 crc kubenswrapper[4769]: I0131 17:12:19.709274 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:12:19 crc kubenswrapper[4769]: I0131 17:12:19.709737 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:12:19 crc kubenswrapper[4769]: I0131 17:12:19.709929 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:12:19 crc kubenswrapper[4769]: E0131 17:12:19.710464 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:12:23 crc kubenswrapper[4769]: I0131 17:12:23.708367 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:23 crc kubenswrapper[4769]: I0131 17:12:23.708823 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:23 crc kubenswrapper[4769]: I0131 17:12:23.708868 4769 scope.go:117] "RemoveContainer" containerID="77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace" Jan 31 17:12:23 crc kubenswrapper[4769]: I0131 17:12:23.708991 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:23 crc kubenswrapper[4769]: E0131 17:12:23.923628 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.194724 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933"} Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.195958 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.196077 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.196429 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:24 crc kubenswrapper[4769]: E0131 17:12:24.197735 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.708579 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.709019 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.709105 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:12:24 crc kubenswrapper[4769]: I0131 17:12:24.709191 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:12:24 crc kubenswrapper[4769]: E0131 17:12:24.709523 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:12:25 crc kubenswrapper[4769]: I0131 17:12:25.708442 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:12:25 crc kubenswrapper[4769]: I0131 17:12:25.708487 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:12:25 crc kubenswrapper[4769]: E0131 17:12:25.708783 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:12:26 crc kubenswrapper[4769]: I0131 17:12:26.708697 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:12:27 crc kubenswrapper[4769]: I0131 17:12:27.222314 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3"} Jan 31 17:12:34 crc kubenswrapper[4769]: I0131 17:12:34.708297 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:12:34 crc kubenswrapper[4769]: I0131 17:12:34.708887 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:12:34 crc kubenswrapper[4769]: I0131 17:12:34.708992 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:12:34 crc kubenswrapper[4769]: E0131 17:12:34.709279 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:12:37 crc kubenswrapper[4769]: I0131 17:12:37.708902 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:12:37 crc kubenswrapper[4769]: I0131 17:12:37.709607 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:12:37 crc kubenswrapper[4769]: I0131 17:12:37.709790 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:12:37 crc kubenswrapper[4769]: I0131 17:12:37.709856 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:12:37 crc kubenswrapper[4769]: E0131 17:12:37.710523 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:12:38 crc kubenswrapper[4769]: I0131 17:12:38.709001 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:38 crc kubenswrapper[4769]: I0131 17:12:38.709141 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:38 crc kubenswrapper[4769]: I0131 17:12:38.709359 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:38 crc kubenswrapper[4769]: E0131 17:12:38.709990 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:39 crc kubenswrapper[4769]: I0131 17:12:39.708257 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:12:39 crc kubenswrapper[4769]: I0131 17:12:39.708286 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:12:39 crc kubenswrapper[4769]: E0131 17:12:39.708521 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:12:49 crc kubenswrapper[4769]: I0131 17:12:49.709044 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:12:49 crc kubenswrapper[4769]: I0131 17:12:49.709742 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:12:49 crc kubenswrapper[4769]: I0131 17:12:49.709996 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.416031 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" exitCode=1 Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.416094 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98"} Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.416369 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda"} Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.416380 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9"} Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.416398 4769 scope.go:117] "RemoveContainer" containerID="99495e56c7a32895e31a02ceb548e1efa74689fe78a325d2cac3ed71226951c2" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.417329 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:12:50 crc kubenswrapper[4769]: E0131 17:12:50.424588 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.710422 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.710482 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.710578 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:12:50 crc kubenswrapper[4769]: I0131 17:12:50.710608 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:12:50 crc kubenswrapper[4769]: E0131 17:12:50.710817 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.470408 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" exitCode=1 Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.470448 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" exitCode=1 Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.470488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98"} Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.470551 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda"} Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.470576 4769 scope.go:117] "RemoveContainer" containerID="72143fdc72a4aeb5d6956e14746c5eefb4688e2a1f8d06687b47ef4b85b8aebd" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.471325 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.471404 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.471562 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:12:51 crc kubenswrapper[4769]: E0131 17:12:51.471902 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.539619 4769 scope.go:117] "RemoveContainer" containerID="67805a171fa50921b299bf9de6a3276969bc5013bd97555c22cdf581a54e8c5e" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.709468 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.709561 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:51 crc kubenswrapper[4769]: I0131 17:12:51.709645 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.516140 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" exitCode=1 Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.516365 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751"} Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.516717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8"} Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.516745 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f"} Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.516771 4769 scope.go:117] "RemoveContainer" containerID="5df4aa2d643db8d794e13ca8e71957995493cf68779403c51bedbd3db5bfcd19" Jan 31 17:12:52 crc kubenswrapper[4769]: I0131 17:12:52.517405 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:12:52 crc kubenswrapper[4769]: E0131 17:12:52.517842 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.552919 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" exitCode=1 Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.553222 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" exitCode=1 Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.553064 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751"} Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.553260 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8"} Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.553283 4769 scope.go:117] "RemoveContainer" containerID="acdc399cdad3416fe47066ca4cbd9d2ba6fff02a1a99c57dc0e67e49c41492fb" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.553980 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.554044 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.554141 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:12:53 crc kubenswrapper[4769]: E0131 17:12:53.554403 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.597210 4769 scope.go:117] "RemoveContainer" containerID="90ce8e5db0d511a2adf4d1875136980b24925ce310acb1050c35973fab2d8c03" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.707835 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:12:53 crc kubenswrapper[4769]: I0131 17:12:53.707867 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:12:53 crc kubenswrapper[4769]: E0131 17:12:53.708119 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.711879 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.713422 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.713636 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:13:04 crc kubenswrapper[4769]: E0131 17:13:04.714084 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.714435 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.714612 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:04 crc kubenswrapper[4769]: I0131 17:13:04.714806 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:04 crc kubenswrapper[4769]: E0131 17:13:04.715347 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:05 crc kubenswrapper[4769]: I0131 17:13:05.708612 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:13:05 crc kubenswrapper[4769]: I0131 17:13:05.708951 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:13:05 crc kubenswrapper[4769]: I0131 17:13:05.709034 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:13:05 crc kubenswrapper[4769]: I0131 17:13:05.709067 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:13:05 crc kubenswrapper[4769]: E0131 17:13:05.709324 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:08 crc kubenswrapper[4769]: I0131 17:13:08.708105 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:08 crc kubenswrapper[4769]: I0131 17:13:08.708672 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:13:08 crc kubenswrapper[4769]: E0131 17:13:08.930935 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.232959 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.235630 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.251038 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.327043 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.327127 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.327424 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmdqh\" (UniqueName: \"kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.429299 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.429406 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmdqh\" (UniqueName: \"kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.429441 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.429911 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.429979 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.450935 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmdqh\" (UniqueName: \"kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh\") pod \"community-operators-9jdkd\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.574096 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.714753 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739"} Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.715722 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:09 crc kubenswrapper[4769]: E0131 17:13:09.715947 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:09 crc kubenswrapper[4769]: I0131 17:13:09.716126 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.114395 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.725129 4769 generic.go:334] "Generic (PLEG): container finished" podID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerID="d7b73d39731a853051c3eba47232411bf5f26e433b342c75e46bb7fe646e1fa4" exitCode=0 Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.725190 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerDied","Data":"d7b73d39731a853051c3eba47232411bf5f26e433b342c75e46bb7fe646e1fa4"} Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.725215 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerStarted","Data":"02b8c1b9d9481acf23f88c557d3bef2f2a2be647340899248d8267c52da6db5f"} Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.727273 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.729322 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" exitCode=1 Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.729371 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739"} Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.729414 4769 scope.go:117] "RemoveContainer" containerID="50b061844f8b9430593d59652fe6b1c117d7129dd574eb1fbaefa1b4c69e0e76" Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.729904 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:10 crc kubenswrapper[4769]: I0131 17:13:10.729927 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:10 crc kubenswrapper[4769]: E0131 17:13:10.730233 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:11 crc kubenswrapper[4769]: I0131 17:13:11.645045 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:13:11 crc kubenswrapper[4769]: I0131 17:13:11.737751 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerStarted","Data":"94a4a04186aa81ceff459f30c630bade1e553e1b13a559815dab61ce8a166d47"} Jan 31 17:13:11 crc kubenswrapper[4769]: I0131 17:13:11.739725 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:11 crc kubenswrapper[4769]: I0131 17:13:11.739746 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:11 crc kubenswrapper[4769]: E0131 17:13:11.739942 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.751156 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" exitCode=1 Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.751218 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa"} Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.751555 4769 scope.go:117] "RemoveContainer" containerID="45c6b15f99b5833b0f2f7a5f1a332ca5114653356f6826aed2cbfdb4dc809c4d" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.752127 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.752179 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.752249 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.752268 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.752299 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:13:12 crc kubenswrapper[4769]: E0131 17:13:12.752666 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.754239 4769 generic.go:334] "Generic (PLEG): container finished" podID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerID="94a4a04186aa81ceff459f30c630bade1e553e1b13a559815dab61ce8a166d47" exitCode=0 Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.754321 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerDied","Data":"94a4a04186aa81ceff459f30c630bade1e553e1b13a559815dab61ce8a166d47"} Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.754601 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:12 crc kubenswrapper[4769]: I0131 17:13:12.754615 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:12 crc kubenswrapper[4769]: E0131 17:13:12.754783 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:13 crc kubenswrapper[4769]: I0131 17:13:13.764604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerStarted","Data":"cbc4015fe660b2574876c36358cc335e6bb64957d032aeb1ea21b4773676cebf"} Jan 31 17:13:13 crc kubenswrapper[4769]: I0131 17:13:13.789581 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9jdkd" podStartSLOduration=2.365870387 podStartE2EDuration="4.789564835s" podCreationTimestamp="2026-01-31 17:13:09 +0000 UTC" firstStartedPulling="2026-01-31 17:13:10.727021645 +0000 UTC m=+2638.801190324" lastFinishedPulling="2026-01-31 17:13:13.150716103 +0000 UTC m=+2641.224884772" observedRunningTime="2026-01-31 17:13:13.783483701 +0000 UTC m=+2641.857652380" watchObservedRunningTime="2026-01-31 17:13:13.789564835 +0000 UTC m=+2641.863733514" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.272912 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.304855 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.308592 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.397002 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.397124 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br62l\" (UniqueName: \"kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.397169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.498226 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br62l\" (UniqueName: \"kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.498273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.498316 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.498875 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.498966 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.518544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br62l\" (UniqueName: \"kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l\") pod \"redhat-marketplace-fkvmn\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.638373 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.709436 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.709796 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:13:17 crc kubenswrapper[4769]: I0131 17:13:17.709897 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:13:17 crc kubenswrapper[4769]: E0131 17:13:17.710165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:13:18 crc kubenswrapper[4769]: I0131 17:13:18.137578 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:18 crc kubenswrapper[4769]: W0131 17:13:18.142050 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7cd326c_9389_4ba7_a37a_469c6d79d421.slice/crio-6104217a3e6b29de800e0aa3fe1103bea5a7dd40782d42f6eb2f32564dfe51be WatchSource:0}: Error finding container 6104217a3e6b29de800e0aa3fe1103bea5a7dd40782d42f6eb2f32564dfe51be: Status 404 returned error can't find the container with id 6104217a3e6b29de800e0aa3fe1103bea5a7dd40782d42f6eb2f32564dfe51be Jan 31 17:13:18 crc kubenswrapper[4769]: I0131 17:13:18.815099 4769 generic.go:334] "Generic (PLEG): container finished" podID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerID="476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61" exitCode=0 Jan 31 17:13:18 crc kubenswrapper[4769]: I0131 17:13:18.815168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerDied","Data":"476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61"} Jan 31 17:13:18 crc kubenswrapper[4769]: I0131 17:13:18.815345 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerStarted","Data":"6104217a3e6b29de800e0aa3fe1103bea5a7dd40782d42f6eb2f32564dfe51be"} Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.575153 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.575202 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.633920 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.708580 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.708648 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.708744 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:19 crc kubenswrapper[4769]: E0131 17:13:19.709119 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.825114 4769 generic.go:334] "Generic (PLEG): container finished" podID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerID="66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b" exitCode=0 Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.825231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerDied","Data":"66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b"} Jan 31 17:13:19 crc kubenswrapper[4769]: I0131 17:13:19.871063 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:20 crc kubenswrapper[4769]: I0131 17:13:20.838550 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerStarted","Data":"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11"} Jan 31 17:13:20 crc kubenswrapper[4769]: I0131 17:13:20.864683 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fkvmn" podStartSLOduration=2.478173372 podStartE2EDuration="3.864663729s" podCreationTimestamp="2026-01-31 17:13:17 +0000 UTC" firstStartedPulling="2026-01-31 17:13:18.816325955 +0000 UTC m=+2646.890494624" lastFinishedPulling="2026-01-31 17:13:20.202816302 +0000 UTC m=+2648.276984981" observedRunningTime="2026-01-31 17:13:20.863062717 +0000 UTC m=+2648.937231426" watchObservedRunningTime="2026-01-31 17:13:20.864663729 +0000 UTC m=+2648.938832408" Jan 31 17:13:21 crc kubenswrapper[4769]: I0131 17:13:21.990534 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:21 crc kubenswrapper[4769]: I0131 17:13:21.990842 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9jdkd" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="registry-server" containerID="cri-o://cbc4015fe660b2574876c36358cc335e6bb64957d032aeb1ea21b4773676cebf" gracePeriod=2 Jan 31 17:13:22 crc kubenswrapper[4769]: I0131 17:13:22.857193 4769 generic.go:334] "Generic (PLEG): container finished" podID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerID="cbc4015fe660b2574876c36358cc335e6bb64957d032aeb1ea21b4773676cebf" exitCode=0 Jan 31 17:13:22 crc kubenswrapper[4769]: I0131 17:13:22.857295 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerDied","Data":"cbc4015fe660b2574876c36358cc335e6bb64957d032aeb1ea21b4773676cebf"} Jan 31 17:13:22 crc kubenswrapper[4769]: I0131 17:13:22.947967 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.085009 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmdqh\" (UniqueName: \"kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh\") pod \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.085199 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities\") pod \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.085254 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content\") pod \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\" (UID: \"0866f9dd-f54e-4ab4-86c7-9dc3a277a544\") " Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.086072 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities" (OuterVolumeSpecName: "utilities") pod "0866f9dd-f54e-4ab4-86c7-9dc3a277a544" (UID: "0866f9dd-f54e-4ab4-86c7-9dc3a277a544"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.088916 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.092629 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh" (OuterVolumeSpecName: "kube-api-access-wmdqh") pod "0866f9dd-f54e-4ab4-86c7-9dc3a277a544" (UID: "0866f9dd-f54e-4ab4-86c7-9dc3a277a544"). InnerVolumeSpecName "kube-api-access-wmdqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.149550 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0866f9dd-f54e-4ab4-86c7-9dc3a277a544" (UID: "0866f9dd-f54e-4ab4-86c7-9dc3a277a544"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.190599 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.190654 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmdqh\" (UniqueName: \"kubernetes.io/projected/0866f9dd-f54e-4ab4-86c7-9dc3a277a544-kube-api-access-wmdqh\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.708429 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.708534 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.708623 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.708633 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.708672 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.867753 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9jdkd" event={"ID":"0866f9dd-f54e-4ab4-86c7-9dc3a277a544","Type":"ContainerDied","Data":"02b8c1b9d9481acf23f88c557d3bef2f2a2be647340899248d8267c52da6db5f"} Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.867798 4769 scope.go:117] "RemoveContainer" containerID="cbc4015fe660b2574876c36358cc335e6bb64957d032aeb1ea21b4773676cebf" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.867820 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9jdkd" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.905624 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.908863 4769 scope.go:117] "RemoveContainer" containerID="94a4a04186aa81ceff459f30c630bade1e553e1b13a559815dab61ce8a166d47" Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.914063 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9jdkd"] Jan 31 17:13:23 crc kubenswrapper[4769]: I0131 17:13:23.936680 4769 scope.go:117] "RemoveContainer" containerID="d7b73d39731a853051c3eba47232411bf5f26e433b342c75e46bb7fe646e1fa4" Jan 31 17:13:24 crc kubenswrapper[4769]: E0131 17:13:24.547269 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.720371 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" path="/var/lib/kubelet/pods/0866f9dd-f54e-4ab4-86c7-9dc3a277a544/volumes" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.891899 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" exitCode=1 Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.891944 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" exitCode=1 Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.891955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24"} Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.891993 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329"} Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892005 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86"} Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892025 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2"} Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892044 4769 scope.go:117] "RemoveContainer" containerID="fcfb9e8a85fc333393776cbd0cc376b3645f3bd4e7e2144bcb19b8983c9b32a6" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892634 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892713 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.892794 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:24 crc kubenswrapper[4769]: E0131 17:13:24.893068 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:24 crc kubenswrapper[4769]: I0131 17:13:24.962743 4769 scope.go:117] "RemoveContainer" containerID="faa0b58c5c0e2ceeb6c757f67f5915e2e34792cf9ac39d58f52e22e957346c5a" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.915654 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" exitCode=1 Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.915700 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" exitCode=1 Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.915735 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24"} Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.915797 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329"} Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.915827 4769 scope.go:117] "RemoveContainer" containerID="4fbd8754d6c10e3d907cb1535af0484fdd0e05947a6708d89bc9ac8a31367cd6" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.917979 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.918280 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.918659 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.918773 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.919083 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:13:25 crc kubenswrapper[4769]: E0131 17:13:25.920304 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:25 crc kubenswrapper[4769]: I0131 17:13:25.989090 4769 scope.go:117] "RemoveContainer" containerID="0ff9e7c546d2aff57f41fb6c39504569a264d96e9c91dd4200b1d4df6621be8a" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.708969 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.709013 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:26 crc kubenswrapper[4769]: E0131 17:13:26.709533 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.933117 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.933488 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.933600 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.933610 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:13:26 crc kubenswrapper[4769]: I0131 17:13:26.933651 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:13:26 crc kubenswrapper[4769]: E0131 17:13:26.933979 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:27 crc kubenswrapper[4769]: I0131 17:13:27.639340 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:27 crc kubenswrapper[4769]: I0131 17:13:27.639389 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:27 crc kubenswrapper[4769]: I0131 17:13:27.681896 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:27 crc kubenswrapper[4769]: I0131 17:13:27.996208 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:28 crc kubenswrapper[4769]: I0131 17:13:28.044813 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:28 crc kubenswrapper[4769]: I0131 17:13:28.673825 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:13:28 crc kubenswrapper[4769]: E0131 17:13:28.673961 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:13:28 crc kubenswrapper[4769]: E0131 17:13:28.674307 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:15:30.674287889 +0000 UTC m=+2778.748456558 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:13:29 crc kubenswrapper[4769]: I0131 17:13:29.963780 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fkvmn" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="registry-server" containerID="cri-o://7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11" gracePeriod=2 Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.462020 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.502926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br62l\" (UniqueName: \"kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l\") pod \"b7cd326c-9389-4ba7-a37a-469c6d79d421\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.503123 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities\") pod \"b7cd326c-9389-4ba7-a37a-469c6d79d421\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.503184 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content\") pod \"b7cd326c-9389-4ba7-a37a-469c6d79d421\" (UID: \"b7cd326c-9389-4ba7-a37a-469c6d79d421\") " Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.504846 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities" (OuterVolumeSpecName: "utilities") pod "b7cd326c-9389-4ba7-a37a-469c6d79d421" (UID: "b7cd326c-9389-4ba7-a37a-469c6d79d421"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.517986 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l" (OuterVolumeSpecName: "kube-api-access-br62l") pod "b7cd326c-9389-4ba7-a37a-469c6d79d421" (UID: "b7cd326c-9389-4ba7-a37a-469c6d79d421"). InnerVolumeSpecName "kube-api-access-br62l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.541301 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7cd326c-9389-4ba7-a37a-469c6d79d421" (UID: "b7cd326c-9389-4ba7-a37a-469c6d79d421"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.605063 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.605164 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7cd326c-9389-4ba7-a37a-469c6d79d421-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.605179 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br62l\" (UniqueName: \"kubernetes.io/projected/b7cd326c-9389-4ba7-a37a-469c6d79d421-kube-api-access-br62l\") on node \"crc\" DevicePath \"\"" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.709353 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.709558 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.709736 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:30 crc kubenswrapper[4769]: E0131 17:13:30.710197 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.975146 4769 generic.go:334] "Generic (PLEG): container finished" podID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerID="7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11" exitCode=0 Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.975193 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerDied","Data":"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11"} Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.975221 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fkvmn" event={"ID":"b7cd326c-9389-4ba7-a37a-469c6d79d421","Type":"ContainerDied","Data":"6104217a3e6b29de800e0aa3fe1103bea5a7dd40782d42f6eb2f32564dfe51be"} Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.975240 4769 scope.go:117] "RemoveContainer" containerID="7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11" Jan 31 17:13:30 crc kubenswrapper[4769]: I0131 17:13:30.975255 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fkvmn" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.007139 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.009210 4769 scope.go:117] "RemoveContainer" containerID="66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.014199 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fkvmn"] Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.047593 4769 scope.go:117] "RemoveContainer" containerID="476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.084379 4769 scope.go:117] "RemoveContainer" containerID="7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11" Jan 31 17:13:31 crc kubenswrapper[4769]: E0131 17:13:31.085012 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11\": container with ID starting with 7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11 not found: ID does not exist" containerID="7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.085062 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11"} err="failed to get container status \"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11\": rpc error: code = NotFound desc = could not find container \"7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11\": container with ID starting with 7c21f7e1c37a0d4add0b8ac383d4c23655e7a4004bed427cd7f82761adf18e11 not found: ID does not exist" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.085094 4769 scope.go:117] "RemoveContainer" containerID="66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b" Jan 31 17:13:31 crc kubenswrapper[4769]: E0131 17:13:31.085945 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b\": container with ID starting with 66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b not found: ID does not exist" containerID="66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.086006 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b"} err="failed to get container status \"66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b\": rpc error: code = NotFound desc = could not find container \"66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b\": container with ID starting with 66a031174b8360be967f40ef0821da6cc13c6e308ce5b6f220ebb333a948d30b not found: ID does not exist" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.086084 4769 scope.go:117] "RemoveContainer" containerID="476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61" Jan 31 17:13:31 crc kubenswrapper[4769]: E0131 17:13:31.086446 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61\": container with ID starting with 476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61 not found: ID does not exist" containerID="476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61" Jan 31 17:13:31 crc kubenswrapper[4769]: I0131 17:13:31.086484 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61"} err="failed to get container status \"476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61\": rpc error: code = NotFound desc = could not find container \"476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61\": container with ID starting with 476951972a715a3a7274069c25423b30d1859935f51300cb2fafe73d7b78ca61 not found: ID does not exist" Jan 31 17:13:32 crc kubenswrapper[4769]: I0131 17:13:32.721023 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:13:32 crc kubenswrapper[4769]: I0131 17:13:32.721042 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" path="/var/lib/kubelet/pods/b7cd326c-9389-4ba7-a37a-469c6d79d421/volumes" Jan 31 17:13:32 crc kubenswrapper[4769]: I0131 17:13:32.721477 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:13:32 crc kubenswrapper[4769]: I0131 17:13:32.721716 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:13:32 crc kubenswrapper[4769]: E0131 17:13:32.722278 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.048339 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30" exitCode=1 Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.048557 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30"} Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.048812 4769 scope.go:117] "RemoveContainer" containerID="3db3e8f1c9b29ed09a5f982f48dcce6770bd77e6d4476386694741e2eedd6d51" Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.050096 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.050221 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.050377 4769 scope.go:117] "RemoveContainer" containerID="d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30" Jan 31 17:13:37 crc kubenswrapper[4769]: I0131 17:13:37.050450 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:37 crc kubenswrapper[4769]: E0131 17:13:37.051047 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:38 crc kubenswrapper[4769]: E0131 17:13:38.679949 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:13:39 crc kubenswrapper[4769]: I0131 17:13:39.071132 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.708929 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.709068 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:40 crc kubenswrapper[4769]: E0131 17:13:40.709491 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.710565 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.710695 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.710846 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.710871 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:13:40 crc kubenswrapper[4769]: I0131 17:13:40.711075 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:13:40 crc kubenswrapper[4769]: E0131 17:13:40.711704 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:47 crc kubenswrapper[4769]: I0131 17:13:47.709746 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:13:47 crc kubenswrapper[4769]: I0131 17:13:47.710643 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:13:47 crc kubenswrapper[4769]: I0131 17:13:47.710888 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:13:47 crc kubenswrapper[4769]: E0131 17:13:47.711556 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.188621 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" exitCode=1 Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.188723 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7"} Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.189048 4769 scope.go:117] "RemoveContainer" containerID="87ddb4a86deaa8f5409259ffc6fa63ac90f4ca6f5dc5bedd2323d2e947621311" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190106 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190232 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190282 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190394 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190415 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:13:50 crc kubenswrapper[4769]: I0131 17:13:50.190482 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:13:50 crc kubenswrapper[4769]: E0131 17:13:50.191098 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:13:51 crc kubenswrapper[4769]: I0131 17:13:51.709602 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:51 crc kubenswrapper[4769]: I0131 17:13:51.709739 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:51 crc kubenswrapper[4769]: I0131 17:13:51.709928 4769 scope.go:117] "RemoveContainer" containerID="d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30" Jan 31 17:13:51 crc kubenswrapper[4769]: I0131 17:13:51.709943 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:51 crc kubenswrapper[4769]: E0131 17:13:51.911400 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:52 crc kubenswrapper[4769]: I0131 17:13:52.258306 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6"} Jan 31 17:13:52 crc kubenswrapper[4769]: I0131 17:13:52.259066 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:13:52 crc kubenswrapper[4769]: I0131 17:13:52.259154 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:13:52 crc kubenswrapper[4769]: I0131 17:13:52.259286 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:13:52 crc kubenswrapper[4769]: E0131 17:13:52.259640 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:13:54 crc kubenswrapper[4769]: I0131 17:13:54.708252 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:13:54 crc kubenswrapper[4769]: I0131 17:13:54.709076 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:13:54 crc kubenswrapper[4769]: E0131 17:13:54.709464 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:14:01 crc kubenswrapper[4769]: I0131 17:14:01.709248 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:14:01 crc kubenswrapper[4769]: I0131 17:14:01.709713 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:14:01 crc kubenswrapper[4769]: I0131 17:14:01.709891 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:14:01 crc kubenswrapper[4769]: E0131 17:14:01.710388 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.457654 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.457923 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.457935 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.457951 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="extract-utilities" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.457956 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="extract-utilities" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.457969 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.457975 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.457981 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="extract-utilities" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.457987 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="extract-utilities" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.457995 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="extract-content" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.458000 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="extract-content" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.458025 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="extract-content" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.458030 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="extract-content" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.458171 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7cd326c-9389-4ba7-a37a-469c6d79d421" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.458196 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0866f9dd-f54e-4ab4-86c7-9dc3a277a544" containerName="registry-server" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.459254 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.518126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.539727 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.539798 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.539904 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7wmv\" (UniqueName: \"kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.642105 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7wmv\" (UniqueName: \"kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.642267 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.642335 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.642837 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.643010 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.661418 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7wmv\" (UniqueName: \"kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv\") pod \"certified-operators-6gcbq\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718523 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718610 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718641 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718704 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718712 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.718761 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:02 crc kubenswrapper[4769]: E0131 17:14:02.722373 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:02 crc kubenswrapper[4769]: I0131 17:14:02.791629 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:03 crc kubenswrapper[4769]: I0131 17:14:03.293444 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:03 crc kubenswrapper[4769]: I0131 17:14:03.353393 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerStarted","Data":"3dd6b3b5fd9880313aace3e93d881e63f21ae8d47d1715038029e308d9aa335a"} Jan 31 17:14:04 crc kubenswrapper[4769]: I0131 17:14:04.362844 4769 generic.go:334] "Generic (PLEG): container finished" podID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerID="7f0b9a75cd5332b9aeb4021edbc77393e1b1e733595100d9565c590376fe3ded" exitCode=0 Jan 31 17:14:04 crc kubenswrapper[4769]: I0131 17:14:04.362894 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerDied","Data":"7f0b9a75cd5332b9aeb4021edbc77393e1b1e733595100d9565c590376fe3ded"} Jan 31 17:14:04 crc kubenswrapper[4769]: I0131 17:14:04.708936 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:04 crc kubenswrapper[4769]: I0131 17:14:04.709344 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:04 crc kubenswrapper[4769]: I0131 17:14:04.709466 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:04 crc kubenswrapper[4769]: E0131 17:14:04.709792 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:14:05 crc kubenswrapper[4769]: I0131 17:14:05.374102 4769 generic.go:334] "Generic (PLEG): container finished" podID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerID="02fbe93eeaa8600605d84652b3c989b7891cad8e05553fae28350e8705233a83" exitCode=0 Jan 31 17:14:05 crc kubenswrapper[4769]: I0131 17:14:05.374143 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerDied","Data":"02fbe93eeaa8600605d84652b3c989b7891cad8e05553fae28350e8705233a83"} Jan 31 17:14:06 crc kubenswrapper[4769]: I0131 17:14:06.385631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerStarted","Data":"36614502485dd1cbe1488e5d5c0a6925b44a3432aec882b57cddd65174ffaefc"} Jan 31 17:14:06 crc kubenswrapper[4769]: I0131 17:14:06.415313 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6gcbq" podStartSLOduration=3.020960578 podStartE2EDuration="4.415292329s" podCreationTimestamp="2026-01-31 17:14:02 +0000 UTC" firstStartedPulling="2026-01-31 17:14:04.365289482 +0000 UTC m=+2692.439458151" lastFinishedPulling="2026-01-31 17:14:05.759621203 +0000 UTC m=+2693.833789902" observedRunningTime="2026-01-31 17:14:06.410583752 +0000 UTC m=+2694.484752431" watchObservedRunningTime="2026-01-31 17:14:06.415292329 +0000 UTC m=+2694.489461038" Jan 31 17:14:07 crc kubenswrapper[4769]: I0131 17:14:07.708698 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:14:07 crc kubenswrapper[4769]: I0131 17:14:07.709002 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:14:07 crc kubenswrapper[4769]: E0131 17:14:07.709337 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:14:12 crc kubenswrapper[4769]: I0131 17:14:12.792068 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:12 crc kubenswrapper[4769]: I0131 17:14:12.792535 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:12 crc kubenswrapper[4769]: I0131 17:14:12.855155 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:13 crc kubenswrapper[4769]: I0131 17:14:13.537875 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:13 crc kubenswrapper[4769]: I0131 17:14:13.604768 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:14 crc kubenswrapper[4769]: I0131 17:14:14.710871 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:14:14 crc kubenswrapper[4769]: I0131 17:14:14.711130 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:14:14 crc kubenswrapper[4769]: I0131 17:14:14.711640 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:14:14 crc kubenswrapper[4769]: E0131 17:14:14.726375 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.471564 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6gcbq" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="registry-server" containerID="cri-o://36614502485dd1cbe1488e5d5c0a6925b44a3432aec882b57cddd65174ffaefc" gracePeriod=2 Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.708794 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.708921 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.708967 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.709063 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.709078 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:15 crc kubenswrapper[4769]: I0131 17:14:15.709169 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:15 crc kubenswrapper[4769]: E0131 17:14:15.709807 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.490308 4769 generic.go:334] "Generic (PLEG): container finished" podID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerID="36614502485dd1cbe1488e5d5c0a6925b44a3432aec882b57cddd65174ffaefc" exitCode=0 Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.490817 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerDied","Data":"36614502485dd1cbe1488e5d5c0a6925b44a3432aec882b57cddd65174ffaefc"} Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.708748 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.708852 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.709024 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:16 crc kubenswrapper[4769]: E0131 17:14:16.709586 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.735016 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.882316 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content\") pod \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.882353 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7wmv\" (UniqueName: \"kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv\") pod \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.882410 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities\") pod \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\" (UID: \"ab8d24f9-99af-4ce5-a775-7cdfff94adfa\") " Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.883693 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities" (OuterVolumeSpecName: "utilities") pod "ab8d24f9-99af-4ce5-a775-7cdfff94adfa" (UID: "ab8d24f9-99af-4ce5-a775-7cdfff94adfa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.891521 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv" (OuterVolumeSpecName: "kube-api-access-b7wmv") pod "ab8d24f9-99af-4ce5-a775-7cdfff94adfa" (UID: "ab8d24f9-99af-4ce5-a775-7cdfff94adfa"). InnerVolumeSpecName "kube-api-access-b7wmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.934376 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab8d24f9-99af-4ce5-a775-7cdfff94adfa" (UID: "ab8d24f9-99af-4ce5-a775-7cdfff94adfa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.983614 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.983647 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7wmv\" (UniqueName: \"kubernetes.io/projected/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-kube-api-access-b7wmv\") on node \"crc\" DevicePath \"\"" Jan 31 17:14:16 crc kubenswrapper[4769]: I0131 17:14:16.983658 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab8d24f9-99af-4ce5-a775-7cdfff94adfa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.508043 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6gcbq" event={"ID":"ab8d24f9-99af-4ce5-a775-7cdfff94adfa","Type":"ContainerDied","Data":"3dd6b3b5fd9880313aace3e93d881e63f21ae8d47d1715038029e308d9aa335a"} Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.508117 4769 scope.go:117] "RemoveContainer" containerID="36614502485dd1cbe1488e5d5c0a6925b44a3432aec882b57cddd65174ffaefc" Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.508132 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6gcbq" Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.547717 4769 scope.go:117] "RemoveContainer" containerID="02fbe93eeaa8600605d84652b3c989b7891cad8e05553fae28350e8705233a83" Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.560644 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.568292 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6gcbq"] Jan 31 17:14:17 crc kubenswrapper[4769]: I0131 17:14:17.584523 4769 scope.go:117] "RemoveContainer" containerID="7f0b9a75cd5332b9aeb4021edbc77393e1b1e733595100d9565c590376fe3ded" Jan 31 17:14:18 crc kubenswrapper[4769]: I0131 17:14:18.738522 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" path="/var/lib/kubelet/pods/ab8d24f9-99af-4ce5-a775-7cdfff94adfa/volumes" Jan 31 17:14:20 crc kubenswrapper[4769]: I0131 17:14:20.708885 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:14:20 crc kubenswrapper[4769]: I0131 17:14:20.708947 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:14:20 crc kubenswrapper[4769]: E0131 17:14:20.709387 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:14:26 crc kubenswrapper[4769]: I0131 17:14:26.709904 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:14:26 crc kubenswrapper[4769]: I0131 17:14:26.710563 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:14:26 crc kubenswrapper[4769]: I0131 17:14:26.710744 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:14:26 crc kubenswrapper[4769]: E0131 17:14:26.711176 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.708745 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709076 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709187 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709323 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709344 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709407 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709573 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709598 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:29 crc kubenswrapper[4769]: E0131 17:14:29.709651 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:14:29 crc kubenswrapper[4769]: I0131 17:14:29.709691 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:29 crc kubenswrapper[4769]: E0131 17:14:29.710414 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:35 crc kubenswrapper[4769]: I0131 17:14:35.708246 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:14:35 crc kubenswrapper[4769]: I0131 17:14:35.708703 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:14:35 crc kubenswrapper[4769]: E0131 17:14:35.708920 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:14:40 crc kubenswrapper[4769]: I0131 17:14:40.709001 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:14:40 crc kubenswrapper[4769]: I0131 17:14:40.709799 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:14:40 crc kubenswrapper[4769]: I0131 17:14:40.709952 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:14:40 crc kubenswrapper[4769]: E0131 17:14:40.710336 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.708567 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.708965 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.708993 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.709049 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.709058 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:41 crc kubenswrapper[4769]: I0131 17:14:41.709099 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:41 crc kubenswrapper[4769]: E0131 17:14:41.857902 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.737701 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47"} Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.738640 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.738727 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.738762 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.738836 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:42 crc kubenswrapper[4769]: I0131 17:14:42.738873 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:42 crc kubenswrapper[4769]: E0131 17:14:42.739138 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:43 crc kubenswrapper[4769]: I0131 17:14:43.708269 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:43 crc kubenswrapper[4769]: I0131 17:14:43.708357 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:43 crc kubenswrapper[4769]: I0131 17:14:43.708482 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:43 crc kubenswrapper[4769]: E0131 17:14:43.708888 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.757485 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" exitCode=1 Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.757582 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933"} Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.757655 4769 scope.go:117] "RemoveContainer" containerID="77d09e7c96f692e0f5b97f9b844245c724259d53f5195c1791ff6dccd16b7ace" Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.758414 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.758521 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.758557 4769 scope.go:117] "RemoveContainer" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" Jan 31 17:14:44 crc kubenswrapper[4769]: I0131 17:14:44.758661 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:44 crc kubenswrapper[4769]: E0131 17:14:44.759185 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:14:49 crc kubenswrapper[4769]: I0131 17:14:49.707946 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:14:49 crc kubenswrapper[4769]: I0131 17:14:49.708475 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:14:49 crc kubenswrapper[4769]: E0131 17:14:49.708755 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:14:50 crc kubenswrapper[4769]: I0131 17:14:50.682603 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:14:50 crc kubenswrapper[4769]: I0131 17:14:50.683724 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708682 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708776 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708810 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708884 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708901 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708912 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.708986 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:14:53 crc kubenswrapper[4769]: I0131 17:14:53.709031 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:14:53 crc kubenswrapper[4769]: E0131 17:14:53.709213 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:14:53 crc kubenswrapper[4769]: E0131 17:14:53.709333 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:14:58 crc kubenswrapper[4769]: I0131 17:14:58.709035 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:14:58 crc kubenswrapper[4769]: I0131 17:14:58.709841 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:14:58 crc kubenswrapper[4769]: I0131 17:14:58.709888 4769 scope.go:117] "RemoveContainer" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" Jan 31 17:14:58 crc kubenswrapper[4769]: I0131 17:14:58.710007 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:14:58 crc kubenswrapper[4769]: E0131 17:14:58.710737 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.153579 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl"] Jan 31 17:15:00 crc kubenswrapper[4769]: E0131 17:15:00.154354 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="registry-server" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.154370 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="registry-server" Jan 31 17:15:00 crc kubenswrapper[4769]: E0131 17:15:00.154395 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="extract-content" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.154403 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="extract-content" Jan 31 17:15:00 crc kubenswrapper[4769]: E0131 17:15:00.154422 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="extract-utilities" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.154432 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="extract-utilities" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.154629 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab8d24f9-99af-4ce5-a775-7cdfff94adfa" containerName="registry-server" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.155281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.164031 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.164254 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.167674 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl"] Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.278525 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.278588 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.278619 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwvcd\" (UniqueName: \"kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.380772 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.380830 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.380869 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwvcd\" (UniqueName: \"kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.382865 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.388770 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.411906 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwvcd\" (UniqueName: \"kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd\") pod \"collect-profiles-29497995-x4npl\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.485056 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:00 crc kubenswrapper[4769]: I0131 17:15:00.983855 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl"] Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.909391 4769 generic.go:334] "Generic (PLEG): container finished" podID="29aed3c0-cd7e-4b3c-9d29-ff2544b10026" containerID="c4fd420ee8a2efd86f62dd0add3cb74bf75c2158401957db33bdb24244f5646a" exitCode=0 Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.909765 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" event={"ID":"29aed3c0-cd7e-4b3c-9d29-ff2544b10026","Type":"ContainerDied","Data":"c4fd420ee8a2efd86f62dd0add3cb74bf75c2158401957db33bdb24244f5646a"} Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.909799 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" event={"ID":"29aed3c0-cd7e-4b3c-9d29-ff2544b10026","Type":"ContainerStarted","Data":"6cd8c21e4266d6f6bcefdf7fe390c3049c12cfbaf772df10a78ccca9ed78648e"} Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917128 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6"} Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917063 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" exitCode=1 Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917192 4769 scope.go:117] "RemoveContainer" containerID="d6f9b1264de3eafed59f6d3cd753bb0fb6a800593dd28fa0e8b4d364827e1f30" Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917824 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917894 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917927 4769 scope.go:117] "RemoveContainer" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917974 4769 scope.go:117] "RemoveContainer" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" Jan 31 17:15:01 crc kubenswrapper[4769]: I0131 17:15:01.917991 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:01 crc kubenswrapper[4769]: E0131 17:15:01.918322 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:02 crc kubenswrapper[4769]: I0131 17:15:02.712636 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:15:02 crc kubenswrapper[4769]: I0131 17:15:02.712666 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:02 crc kubenswrapper[4769]: E0131 17:15:02.712894 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.314883 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.424590 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume\") pod \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.424927 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume\") pod \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.425008 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwvcd\" (UniqueName: \"kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd\") pod \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\" (UID: \"29aed3c0-cd7e-4b3c-9d29-ff2544b10026\") " Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.425637 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume" (OuterVolumeSpecName: "config-volume") pod "29aed3c0-cd7e-4b3c-9d29-ff2544b10026" (UID: "29aed3c0-cd7e-4b3c-9d29-ff2544b10026"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.430245 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "29aed3c0-cd7e-4b3c-9d29-ff2544b10026" (UID: "29aed3c0-cd7e-4b3c-9d29-ff2544b10026"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.430259 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd" (OuterVolumeSpecName: "kube-api-access-bwvcd") pod "29aed3c0-cd7e-4b3c-9d29-ff2544b10026" (UID: "29aed3c0-cd7e-4b3c-9d29-ff2544b10026"). InnerVolumeSpecName "kube-api-access-bwvcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.527407 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.527459 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-config-volume\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.527481 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwvcd\" (UniqueName: \"kubernetes.io/projected/29aed3c0-cd7e-4b3c-9d29-ff2544b10026-kube-api-access-bwvcd\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.939175 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" event={"ID":"29aed3c0-cd7e-4b3c-9d29-ff2544b10026","Type":"ContainerDied","Data":"6cd8c21e4266d6f6bcefdf7fe390c3049c12cfbaf772df10a78ccca9ed78648e"} Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.939226 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cd8c21e4266d6f6bcefdf7fe390c3049c12cfbaf772df10a78ccca9ed78648e" Jan 31 17:15:03 crc kubenswrapper[4769]: I0131 17:15:03.939282 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29497995-x4npl" Jan 31 17:15:04 crc kubenswrapper[4769]: I0131 17:15:04.375029 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx"] Jan 31 17:15:04 crc kubenswrapper[4769]: I0131 17:15:04.381959 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29497950-gf6hx"] Jan 31 17:15:04 crc kubenswrapper[4769]: I0131 17:15:04.741700 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aecc20b3-16e2-4d56-93ec-2c62b4a45e56" path="/var/lib/kubelet/pods/aecc20b3-16e2-4d56-93ec-2c62b4a45e56/volumes" Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.968323 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c" exitCode=1 Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.968511 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c"} Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.968616 4769 scope.go:117] "RemoveContainer" containerID="cf3815edf78281d41b22fdba9f12ad0237e640690e8100d80b38c9175962d790" Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.969324 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.969400 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.969429 4769 scope.go:117] "RemoveContainer" containerID="e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c" Jan 31 17:15:05 crc kubenswrapper[4769]: I0131 17:15:05.969583 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:05 crc kubenswrapper[4769]: E0131 17:15:05.969948 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.496858 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:07 crc kubenswrapper[4769]: E0131 17:15:07.497634 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29aed3c0-cd7e-4b3c-9d29-ff2544b10026" containerName="collect-profiles" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.497654 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29aed3c0-cd7e-4b3c-9d29-ff2544b10026" containerName="collect-profiles" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.498001 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29aed3c0-cd7e-4b3c-9d29-ff2544b10026" containerName="collect-profiles" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.499940 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.515772 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.589363 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.589418 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.589677 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hdrt\" (UniqueName: \"kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.690914 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.690973 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.691067 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hdrt\" (UniqueName: \"kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.691663 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.691734 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.709662 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.709756 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.709787 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.709863 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.709941 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:15:07 crc kubenswrapper[4769]: E0131 17:15:07.710292 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.714128 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hdrt\" (UniqueName: \"kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt\") pod \"redhat-operators-4fs84\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:07 crc kubenswrapper[4769]: I0131 17:15:07.842346 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:08 crc kubenswrapper[4769]: I0131 17:15:08.081036 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:09 crc kubenswrapper[4769]: I0131 17:15:09.006615 4769 generic.go:334] "Generic (PLEG): container finished" podID="94016076-ac13-4d38-8a43-6cc6fea39577" containerID="fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6" exitCode=0 Jan 31 17:15:09 crc kubenswrapper[4769]: I0131 17:15:09.006686 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerDied","Data":"fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6"} Jan 31 17:15:09 crc kubenswrapper[4769]: I0131 17:15:09.007027 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerStarted","Data":"72ad4a1ed59ec68620f080ae0df8bc5699c9ccd4808601340d98cafca426f369"} Jan 31 17:15:10 crc kubenswrapper[4769]: I0131 17:15:10.015625 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerStarted","Data":"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0"} Jan 31 17:15:11 crc kubenswrapper[4769]: I0131 17:15:11.027914 4769 generic.go:334] "Generic (PLEG): container finished" podID="94016076-ac13-4d38-8a43-6cc6fea39577" containerID="24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0" exitCode=0 Jan 31 17:15:11 crc kubenswrapper[4769]: I0131 17:15:11.027981 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerDied","Data":"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0"} Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.038978 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerStarted","Data":"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd"} Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.069711 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4fs84" podStartSLOduration=2.6741307880000003 podStartE2EDuration="5.069690977s" podCreationTimestamp="2026-01-31 17:15:07 +0000 UTC" firstStartedPulling="2026-01-31 17:15:09.009044421 +0000 UTC m=+2757.083213100" lastFinishedPulling="2026-01-31 17:15:11.40460458 +0000 UTC m=+2759.478773289" observedRunningTime="2026-01-31 17:15:12.059239406 +0000 UTC m=+2760.133408165" watchObservedRunningTime="2026-01-31 17:15:12.069690977 +0000 UTC m=+2760.143859646" Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.738015 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.738106 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.738128 4769 scope.go:117] "RemoveContainer" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.738245 4769 scope.go:117] "RemoveContainer" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" Jan 31 17:15:12 crc kubenswrapper[4769]: I0131 17:15:12.738253 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:12 crc kubenswrapper[4769]: E0131 17:15:12.891954 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:13 crc kubenswrapper[4769]: I0131 17:15:13.058258 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464"} Jan 31 17:15:13 crc kubenswrapper[4769]: I0131 17:15:13.059204 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:13 crc kubenswrapper[4769]: I0131 17:15:13.059265 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:13 crc kubenswrapper[4769]: I0131 17:15:13.059339 4769 scope.go:117] "RemoveContainer" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" Jan 31 17:15:13 crc kubenswrapper[4769]: I0131 17:15:13.059348 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:13 crc kubenswrapper[4769]: E0131 17:15:13.059654 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:14 crc kubenswrapper[4769]: I0131 17:15:14.406157 4769 scope.go:117] "RemoveContainer" containerID="76943742668c5c20bee77cf85f46bc951d40d6b1d5ecd9c3720b69e068175428" Jan 31 17:15:14 crc kubenswrapper[4769]: I0131 17:15:14.708749 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:15:14 crc kubenswrapper[4769]: I0131 17:15:14.709101 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:14 crc kubenswrapper[4769]: E0131 17:15:14.709487 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.708923 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.709005 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.709025 4769 scope.go:117] "RemoveContainer" containerID="e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.709083 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.842653 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:17 crc kubenswrapper[4769]: I0131 17:15:17.842702 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:17 crc kubenswrapper[4769]: E0131 17:15:17.921862 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:18 crc kubenswrapper[4769]: I0131 17:15:18.139162 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861"} Jan 31 17:15:18 crc kubenswrapper[4769]: I0131 17:15:18.139714 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:18 crc kubenswrapper[4769]: I0131 17:15:18.139772 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:18 crc kubenswrapper[4769]: I0131 17:15:18.139854 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:18 crc kubenswrapper[4769]: E0131 17:15:18.140108 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:18 crc kubenswrapper[4769]: I0131 17:15:18.888971 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4fs84" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="registry-server" probeResult="failure" output=< Jan 31 17:15:18 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Jan 31 17:15:18 crc kubenswrapper[4769]: > Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.681819 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.681915 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.709487 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.709692 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.709740 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.709922 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:15:20 crc kubenswrapper[4769]: I0131 17:15:20.710004 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:15:20 crc kubenswrapper[4769]: E0131 17:15:20.892551 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:15:21 crc kubenswrapper[4769]: I0131 17:15:21.171379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579"} Jan 31 17:15:21 crc kubenswrapper[4769]: I0131 17:15:21.172245 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:15:21 crc kubenswrapper[4769]: I0131 17:15:21.172353 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:15:21 crc kubenswrapper[4769]: I0131 17:15:21.172540 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:15:21 crc kubenswrapper[4769]: I0131 17:15:21.172613 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:15:21 crc kubenswrapper[4769]: E0131 17:15:21.173014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.193161 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba" exitCode=1 Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.193298 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba"} Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.194793 4769 scope.go:117] "RemoveContainer" containerID="ee30a33f60a82d2e1dae52bdc8a34b5c68ccb4fb368981ae183e0dea0860570b" Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.195609 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.195681 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.195755 4769 scope.go:117] "RemoveContainer" containerID="70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba" Jan 31 17:15:22 crc kubenswrapper[4769]: I0131 17:15:22.195775 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:22 crc kubenswrapper[4769]: E0131 17:15:22.196090 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:27 crc kubenswrapper[4769]: I0131 17:15:27.924834 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.002295 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.168337 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.709408 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.709951 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.710142 4769 scope.go:117] "RemoveContainer" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" Jan 31 17:15:28 crc kubenswrapper[4769]: I0131 17:15:28.710177 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:28 crc kubenswrapper[4769]: E0131 17:15:28.912662 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.279676 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b"} Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.280919 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.280994 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.281086 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:29 crc kubenswrapper[4769]: E0131 17:15:29.281432 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.288289 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" exitCode=1 Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.288360 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861"} Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.288530 4769 scope.go:117] "RemoveContainer" containerID="e7458c7a973316d933083196f2ae04bbc7c0b445b16ad8bab77c5a38d93d145c" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.288991 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.289020 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4fs84" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="registry-server" containerID="cri-o://84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd" gracePeriod=2 Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.289141 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.289188 4769 scope.go:117] "RemoveContainer" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.289252 4769 scope.go:117] "RemoveContainer" containerID="70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.289260 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:29 crc kubenswrapper[4769]: E0131 17:15:29.289796 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 10s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.708516 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.708743 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.726142 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:29 crc kubenswrapper[4769]: E0131 17:15:29.860140 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.888779 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities\") pod \"94016076-ac13-4d38-8a43-6cc6fea39577\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.888898 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hdrt\" (UniqueName: \"kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt\") pod \"94016076-ac13-4d38-8a43-6cc6fea39577\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.888944 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content\") pod \"94016076-ac13-4d38-8a43-6cc6fea39577\" (UID: \"94016076-ac13-4d38-8a43-6cc6fea39577\") " Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.889983 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities" (OuterVolumeSpecName: "utilities") pod "94016076-ac13-4d38-8a43-6cc6fea39577" (UID: "94016076-ac13-4d38-8a43-6cc6fea39577"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.893934 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt" (OuterVolumeSpecName: "kube-api-access-8hdrt") pod "94016076-ac13-4d38-8a43-6cc6fea39577" (UID: "94016076-ac13-4d38-8a43-6cc6fea39577"). InnerVolumeSpecName "kube-api-access-8hdrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.991363 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hdrt\" (UniqueName: \"kubernetes.io/projected/94016076-ac13-4d38-8a43-6cc6fea39577-kube-api-access-8hdrt\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:29 crc kubenswrapper[4769]: I0131 17:15:29.991417 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-utilities\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.007190 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94016076-ac13-4d38-8a43-6cc6fea39577" (UID: "94016076-ac13-4d38-8a43-6cc6fea39577"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.092604 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94016076-ac13-4d38-8a43-6cc6fea39577-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.308241 4769 generic.go:334] "Generic (PLEG): container finished" podID="94016076-ac13-4d38-8a43-6cc6fea39577" containerID="84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd" exitCode=0 Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.308306 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerDied","Data":"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd"} Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.308331 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4fs84" event={"ID":"94016076-ac13-4d38-8a43-6cc6fea39577","Type":"ContainerDied","Data":"72ad4a1ed59ec68620f080ae0df8bc5699c9ccd4808601340d98cafca426f369"} Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.308347 4769 scope.go:117] "RemoveContainer" containerID="84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.308594 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4fs84" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.335238 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a"} Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.335753 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.335865 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.336111 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.345167 4769 scope.go:117] "RemoveContainer" containerID="24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.350750 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.355283 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4fs84"] Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.395648 4769 scope.go:117] "RemoveContainer" containerID="fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.423659 4769 scope.go:117] "RemoveContainer" containerID="84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd" Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.424070 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd\": container with ID starting with 84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd not found: ID does not exist" containerID="84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.424102 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd"} err="failed to get container status \"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd\": rpc error: code = NotFound desc = could not find container \"84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd\": container with ID starting with 84c0072f78c62477a2dde0bdbb6a1b831c59f56589538cfbad208bc13b0188bd not found: ID does not exist" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.424123 4769 scope.go:117] "RemoveContainer" containerID="24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0" Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.424818 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0\": container with ID starting with 24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0 not found: ID does not exist" containerID="24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.424842 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0"} err="failed to get container status \"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0\": rpc error: code = NotFound desc = could not find container \"24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0\": container with ID starting with 24a64afd4a5bbf529ad5ce09b40a6f1a313acbadc1b73a94aad6ed671994e7c0 not found: ID does not exist" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.424857 4769 scope.go:117] "RemoveContainer" containerID="fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6" Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.425073 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6\": container with ID starting with fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6 not found: ID does not exist" containerID="fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.425094 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6"} err="failed to get container status \"fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6\": rpc error: code = NotFound desc = could not find container \"fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6\": container with ID starting with fcb47e1b95bd23ee6e8d25d16da4fcd90919c129a9f607b62b1821c8d13b8eb6 not found: ID does not exist" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.715942 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" path="/var/lib/kubelet/pods/94016076-ac13-4d38-8a43-6cc6fea39577/volumes" Jan 31 17:15:30 crc kubenswrapper[4769]: I0131 17:15:30.730400 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.730587 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:15:30 crc kubenswrapper[4769]: E0131 17:15:30.730654 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:17:32.730636813 +0000 UTC m=+2900.804805482 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:15:31 crc kubenswrapper[4769]: I0131 17:15:31.347534 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:31 crc kubenswrapper[4769]: E0131 17:15:31.347812 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:35 crc kubenswrapper[4769]: I0131 17:15:35.647957 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:35 crc kubenswrapper[4769]: I0131 17:15:35.709330 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:15:35 crc kubenswrapper[4769]: I0131 17:15:35.709460 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:15:35 crc kubenswrapper[4769]: I0131 17:15:35.709662 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:15:35 crc kubenswrapper[4769]: I0131 17:15:35.709728 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:15:35 crc kubenswrapper[4769]: E0131 17:15:35.710186 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:15:36 crc kubenswrapper[4769]: I0131 17:15:36.647702 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:38 crc kubenswrapper[4769]: I0131 17:15:38.648911 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.647337 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.648904 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.649014 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.649975 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.650018 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.650058 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a" gracePeriod=30 Jan 31 17:15:41 crc kubenswrapper[4769]: I0131 17:15:41.651067 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:42 crc kubenswrapper[4769]: E0131 17:15:42.072769 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.449295 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a" exitCode=0 Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.449387 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.449487 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a"} Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.449630 4769 scope.go:117] "RemoveContainer" containerID="8b255e9b67d1ed575196b0f86f7c7a9b343f002ed40dfb1d715798e0a6d44926" Jan 31 17:15:42 crc kubenswrapper[4769]: E0131 17:15:42.471599 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.712154 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.712219 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:42 crc kubenswrapper[4769]: I0131 17:15:42.712304 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:42 crc kubenswrapper[4769]: E0131 17:15:42.712555 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.461168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c"} Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.461743 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.462165 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:43 crc kubenswrapper[4769]: E0131 17:15:43.462627 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.709044 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.709175 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.709220 4769 scope.go:117] "RemoveContainer" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.709402 4769 scope.go:117] "RemoveContainer" containerID="70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba" Jan 31 17:15:43 crc kubenswrapper[4769]: I0131 17:15:43.709418 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:43 crc kubenswrapper[4769]: E0131 17:15:43.877241 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.484746 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267"} Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.485535 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:44 crc kubenswrapper[4769]: E0131 17:15:44.485898 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.487052 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.487338 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.487550 4769 scope.go:117] "RemoveContainer" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" Jan 31 17:15:44 crc kubenswrapper[4769]: I0131 17:15:44.487835 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:44 crc kubenswrapper[4769]: E0131 17:15:44.490130 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:15:47 crc kubenswrapper[4769]: I0131 17:15:47.649600 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:49 crc kubenswrapper[4769]: I0131 17:15:49.708393 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:15:49 crc kubenswrapper[4769]: I0131 17:15:49.708801 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:15:49 crc kubenswrapper[4769]: I0131 17:15:49.708915 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:15:49 crc kubenswrapper[4769]: I0131 17:15:49.708984 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:15:49 crc kubenswrapper[4769]: E0131 17:15:49.709328 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.647629 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.682704 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.682793 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.682855 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.683664 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 17:15:50 crc kubenswrapper[4769]: I0131 17:15:50.683768 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3" gracePeriod=600 Jan 31 17:15:51 crc kubenswrapper[4769]: I0131 17:15:51.545663 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3" exitCode=0 Jan 31 17:15:51 crc kubenswrapper[4769]: I0131 17:15:51.545716 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3"} Jan 31 17:15:51 crc kubenswrapper[4769]: I0131 17:15:51.545756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerStarted","Data":"4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f"} Jan 31 17:15:51 crc kubenswrapper[4769]: I0131 17:15:51.545777 4769 scope.go:117] "RemoveContainer" containerID="fb744d21157f411015c8cd1651de19adb19d19b1ce3580e5574de4b8b82236f1" Jan 31 17:15:51 crc kubenswrapper[4769]: I0131 17:15:51.647750 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.647134 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.647693 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.648803 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.648860 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.648919 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" gracePeriod=30 Jan 31 17:15:53 crc kubenswrapper[4769]: I0131 17:15:53.649610 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:15:53 crc kubenswrapper[4769]: E0131 17:15:53.767204 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:54 crc kubenswrapper[4769]: I0131 17:15:54.569863 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" exitCode=0 Jan 31 17:15:54 crc kubenswrapper[4769]: I0131 17:15:54.569901 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c"} Jan 31 17:15:54 crc kubenswrapper[4769]: I0131 17:15:54.569931 4769 scope.go:117] "RemoveContainer" containerID="286d46808a0237ce122ecbae5fbd04f959231e4a773112f861deab49c286012a" Jan 31 17:15:54 crc kubenswrapper[4769]: I0131 17:15:54.570474 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:15:54 crc kubenswrapper[4769]: I0131 17:15:54.570539 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:15:54 crc kubenswrapper[4769]: E0131 17:15:54.570791 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:15:56 crc kubenswrapper[4769]: I0131 17:15:56.708305 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:15:56 crc kubenswrapper[4769]: I0131 17:15:56.708686 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:15:56 crc kubenswrapper[4769]: I0131 17:15:56.708777 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:15:56 crc kubenswrapper[4769]: E0131 17:15:56.709103 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:15:59 crc kubenswrapper[4769]: I0131 17:15:59.708804 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:15:59 crc kubenswrapper[4769]: I0131 17:15:59.709358 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:15:59 crc kubenswrapper[4769]: I0131 17:15:59.709411 4769 scope.go:117] "RemoveContainer" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" Jan 31 17:15:59 crc kubenswrapper[4769]: I0131 17:15:59.709598 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:15:59 crc kubenswrapper[4769]: E0131 17:15:59.881899 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:00 crc kubenswrapper[4769]: I0131 17:16:00.634422 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f"} Jan 31 17:16:00 crc kubenswrapper[4769]: I0131 17:16:00.635324 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:16:00 crc kubenswrapper[4769]: I0131 17:16:00.635386 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:16:00 crc kubenswrapper[4769]: I0131 17:16:00.635470 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:16:00 crc kubenswrapper[4769]: E0131 17:16:00.635800 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:03 crc kubenswrapper[4769]: I0131 17:16:03.710025 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:16:03 crc kubenswrapper[4769]: I0131 17:16:03.710687 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:16:03 crc kubenswrapper[4769]: I0131 17:16:03.710974 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:16:03 crc kubenswrapper[4769]: I0131 17:16:03.711110 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:16:03 crc kubenswrapper[4769]: E0131 17:16:03.711995 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:16:05 crc kubenswrapper[4769]: I0131 17:16:05.708977 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:16:05 crc kubenswrapper[4769]: I0131 17:16:05.709782 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:16:05 crc kubenswrapper[4769]: E0131 17:16:05.710398 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:16:09 crc kubenswrapper[4769]: I0131 17:16:09.709082 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:16:09 crc kubenswrapper[4769]: I0131 17:16:09.709858 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:16:09 crc kubenswrapper[4769]: I0131 17:16:09.710073 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:16:09 crc kubenswrapper[4769]: E0131 17:16:09.710789 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:16:15 crc kubenswrapper[4769]: I0131 17:16:15.708643 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:16:15 crc kubenswrapper[4769]: I0131 17:16:15.709337 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:16:15 crc kubenswrapper[4769]: I0131 17:16:15.709545 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:16:15 crc kubenswrapper[4769]: E0131 17:16:15.710044 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:16 crc kubenswrapper[4769]: I0131 17:16:16.708979 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:16:16 crc kubenswrapper[4769]: I0131 17:16:16.709090 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:16:16 crc kubenswrapper[4769]: I0131 17:16:16.709228 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:16:16 crc kubenswrapper[4769]: I0131 17:16:16.709284 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:16:16 crc kubenswrapper[4769]: E0131 17:16:16.709676 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.708616 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.708974 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:16:18 crc kubenswrapper[4769]: E0131 17:16:18.709383 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.800898 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" exitCode=1 Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.800959 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464"} Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.801597 4769 scope.go:117] "RemoveContainer" containerID="b05cfd3993f9609da7c0b1896e627e3b448ea96c8859fbfbb4e6331cd4667933" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.802244 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.802311 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.802336 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:16:18 crc kubenswrapper[4769]: I0131 17:16:18.802408 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:16:18 crc kubenswrapper[4769]: E0131 17:16:18.802721 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:16:26 crc kubenswrapper[4769]: I0131 17:16:26.710273 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:16:26 crc kubenswrapper[4769]: I0131 17:16:26.710976 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:16:26 crc kubenswrapper[4769]: I0131 17:16:26.711160 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:16:26 crc kubenswrapper[4769]: E0131 17:16:26.711866 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709120 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709201 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709222 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709279 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:16:29 crc kubenswrapper[4769]: E0131 17:16:29.709601 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709764 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709857 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.709977 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:16:29 crc kubenswrapper[4769]: I0131 17:16:29.710024 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:16:29 crc kubenswrapper[4769]: E0131 17:16:29.710361 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:16:33 crc kubenswrapper[4769]: I0131 17:16:33.708932 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:16:33 crc kubenswrapper[4769]: I0131 17:16:33.709380 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:16:33 crc kubenswrapper[4769]: E0131 17:16:33.709935 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:16:41 crc kubenswrapper[4769]: I0131 17:16:41.709184 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:16:41 crc kubenswrapper[4769]: I0131 17:16:41.709747 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:16:41 crc kubenswrapper[4769]: I0131 17:16:41.709944 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:16:41 crc kubenswrapper[4769]: E0131 17:16:41.710657 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:43 crc kubenswrapper[4769]: I0131 17:16:43.709638 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:16:43 crc kubenswrapper[4769]: I0131 17:16:43.709935 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:16:43 crc kubenswrapper[4769]: I0131 17:16:43.710052 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:16:43 crc kubenswrapper[4769]: I0131 17:16:43.710097 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:16:43 crc kubenswrapper[4769]: E0131 17:16:43.710382 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:16:44 crc kubenswrapper[4769]: I0131 17:16:44.709894 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:16:44 crc kubenswrapper[4769]: I0131 17:16:44.710025 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:16:44 crc kubenswrapper[4769]: I0131 17:16:44.710071 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:16:44 crc kubenswrapper[4769]: I0131 17:16:44.710190 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:16:44 crc kubenswrapper[4769]: E0131 17:16:44.710936 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:16:47 crc kubenswrapper[4769]: I0131 17:16:47.708928 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:16:47 crc kubenswrapper[4769]: I0131 17:16:47.709450 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:16:47 crc kubenswrapper[4769]: E0131 17:16:47.709952 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:16:56 crc kubenswrapper[4769]: I0131 17:16:56.709024 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:16:56 crc kubenswrapper[4769]: I0131 17:16:56.709847 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:16:56 crc kubenswrapper[4769]: I0131 17:16:56.710028 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:16:56 crc kubenswrapper[4769]: E0131 17:16:56.710525 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:16:57 crc kubenswrapper[4769]: I0131 17:16:57.709428 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:16:57 crc kubenswrapper[4769]: I0131 17:16:57.709548 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:16:57 crc kubenswrapper[4769]: I0131 17:16:57.709579 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:16:57 crc kubenswrapper[4769]: I0131 17:16:57.709661 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:16:57 crc kubenswrapper[4769]: E0131 17:16:57.710025 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:16:58 crc kubenswrapper[4769]: I0131 17:16:58.708358 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:16:58 crc kubenswrapper[4769]: I0131 17:16:58.708807 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:16:58 crc kubenswrapper[4769]: I0131 17:16:58.708988 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:16:58 crc kubenswrapper[4769]: I0131 17:16:58.709053 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:16:58 crc kubenswrapper[4769]: E0131 17:16:58.709476 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:02 crc kubenswrapper[4769]: I0131 17:17:02.719917 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:17:02 crc kubenswrapper[4769]: I0131 17:17:02.720310 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:17:02 crc kubenswrapper[4769]: E0131 17:17:02.720876 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:17:07 crc kubenswrapper[4769]: I0131 17:17:07.708738 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:17:07 crc kubenswrapper[4769]: I0131 17:17:07.709198 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:17:07 crc kubenswrapper[4769]: I0131 17:17:07.709429 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:17:07 crc kubenswrapper[4769]: E0131 17:17:07.710014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:17:09 crc kubenswrapper[4769]: I0131 17:17:09.708787 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:17:09 crc kubenswrapper[4769]: I0131 17:17:09.708886 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:17:09 crc kubenswrapper[4769]: I0131 17:17:09.708917 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:17:09 crc kubenswrapper[4769]: I0131 17:17:09.709011 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:17:09 crc kubenswrapper[4769]: E0131 17:17:09.904776 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:17:10 crc kubenswrapper[4769]: I0131 17:17:10.341477 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54"} Jan 31 17:17:10 crc kubenswrapper[4769]: I0131 17:17:10.342153 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:17:10 crc kubenswrapper[4769]: I0131 17:17:10.342213 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:17:10 crc kubenswrapper[4769]: I0131 17:17:10.342297 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:17:10 crc kubenswrapper[4769]: E0131 17:17:10.342558 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:17:11 crc kubenswrapper[4769]: I0131 17:17:11.711848 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:17:11 crc kubenswrapper[4769]: I0131 17:17:11.712655 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:17:11 crc kubenswrapper[4769]: I0131 17:17:11.713199 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:17:11 crc kubenswrapper[4769]: I0131 17:17:11.713407 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:17:11 crc kubenswrapper[4769]: E0131 17:17:11.714666 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:13 crc kubenswrapper[4769]: I0131 17:17:13.709240 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:17:13 crc kubenswrapper[4769]: I0131 17:17:13.709304 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:17:13 crc kubenswrapper[4769]: E0131 17:17:13.709926 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.702777 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fk6lp/must-gather-9kcd6"] Jan 31 17:17:20 crc kubenswrapper[4769]: E0131 17:17:20.703649 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="extract-content" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.703664 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="extract-content" Jan 31 17:17:20 crc kubenswrapper[4769]: E0131 17:17:20.703693 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="registry-server" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.703701 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="registry-server" Jan 31 17:17:20 crc kubenswrapper[4769]: E0131 17:17:20.703723 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="extract-utilities" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.703732 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="extract-utilities" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.703931 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="94016076-ac13-4d38-8a43-6cc6fea39577" containerName="registry-server" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.704944 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.725379 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fk6lp/must-gather-9kcd6"] Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.725870 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fk6lp"/"openshift-service-ca.crt" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.726102 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fk6lp"/"kube-root-ca.crt" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.783128 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.783194 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqgjs\" (UniqueName: \"kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.884215 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.884267 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqgjs\" (UniqueName: \"kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.884712 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:20 crc kubenswrapper[4769]: I0131 17:17:20.910092 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqgjs\" (UniqueName: \"kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs\") pod \"must-gather-9kcd6\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:21 crc kubenswrapper[4769]: I0131 17:17:21.034936 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:17:21 crc kubenswrapper[4769]: I0131 17:17:21.436246 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fk6lp/must-gather-9kcd6"] Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.453958 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" event={"ID":"29cb450c-f082-4909-977f-840f5f050086","Type":"ContainerStarted","Data":"d9747d845255d6ec4595d83e2c6b82766e9c6c4c892819839159cb14c53d1425"} Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.713706 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.713773 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.713858 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:17:22 crc kubenswrapper[4769]: E0131 17:17:22.714168 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714255 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714396 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714459 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714556 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714654 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714783 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:17:22 crc kubenswrapper[4769]: I0131 17:17:22.714821 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:17:22 crc kubenswrapper[4769]: E0131 17:17:22.714873 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:17:22 crc kubenswrapper[4769]: E0131 17:17:22.715072 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:25 crc kubenswrapper[4769]: I0131 17:17:25.480024 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" event={"ID":"29cb450c-f082-4909-977f-840f5f050086","Type":"ContainerStarted","Data":"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5"} Jan 31 17:17:26 crc kubenswrapper[4769]: I0131 17:17:26.490553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" event={"ID":"29cb450c-f082-4909-977f-840f5f050086","Type":"ContainerStarted","Data":"20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a"} Jan 31 17:17:26 crc kubenswrapper[4769]: I0131 17:17:26.514395 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" podStartSLOduration=2.808818536 podStartE2EDuration="6.514373148s" podCreationTimestamp="2026-01-31 17:17:20 +0000 UTC" firstStartedPulling="2026-01-31 17:17:21.447403986 +0000 UTC m=+2889.521572665" lastFinishedPulling="2026-01-31 17:17:25.152958608 +0000 UTC m=+2893.227127277" observedRunningTime="2026-01-31 17:17:26.506100747 +0000 UTC m=+2894.580269436" watchObservedRunningTime="2026-01-31 17:17:26.514373148 +0000 UTC m=+2894.588541817" Jan 31 17:17:28 crc kubenswrapper[4769]: I0131 17:17:28.708768 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:17:28 crc kubenswrapper[4769]: I0131 17:17:28.709069 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:17:28 crc kubenswrapper[4769]: E0131 17:17:28.709326 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:17:32 crc kubenswrapper[4769]: I0131 17:17:32.784876 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:17:32 crc kubenswrapper[4769]: E0131 17:17:32.785109 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:17:32 crc kubenswrapper[4769]: E0131 17:17:32.785163 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:19:34.785145851 +0000 UTC m=+3022.859314540 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:17:35 crc kubenswrapper[4769]: I0131 17:17:35.708870 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:17:35 crc kubenswrapper[4769]: I0131 17:17:35.710751 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:17:35 crc kubenswrapper[4769]: I0131 17:17:35.711097 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:17:35 crc kubenswrapper[4769]: I0131 17:17:35.711296 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:17:35 crc kubenswrapper[4769]: E0131 17:17:35.712050 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.708904 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.708974 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.709058 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:17:36 crc kubenswrapper[4769]: E0131 17:17:36.709329 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.710872 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.711174 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:17:36 crc kubenswrapper[4769]: I0131 17:17:36.711479 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:17:36 crc kubenswrapper[4769]: E0131 17:17:36.712108 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.572285 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" exitCode=1 Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.572375 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579"} Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.572901 4769 scope.go:117] "RemoveContainer" containerID="e02510ae3cb1cffb2ecbbc81af17dca00f092b778dff22277e474bd53c8f2cc7" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.573703 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.573779 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.573808 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.573901 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:17:37 crc kubenswrapper[4769]: I0131 17:17:37.573948 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:17:37 crc kubenswrapper[4769]: E0131 17:17:37.574281 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:42 crc kubenswrapper[4769]: I0131 17:17:42.712765 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:17:42 crc kubenswrapper[4769]: I0131 17:17:42.713326 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:17:42 crc kubenswrapper[4769]: E0131 17:17:42.713626 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:17:45 crc kubenswrapper[4769]: E0131 17:17:45.450354 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:17:45 crc kubenswrapper[4769]: I0131 17:17:45.638774 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:17:47 crc kubenswrapper[4769]: I0131 17:17:47.710083 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:17:47 crc kubenswrapper[4769]: I0131 17:17:47.710798 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:17:47 crc kubenswrapper[4769]: I0131 17:17:47.711099 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:17:47 crc kubenswrapper[4769]: E0131 17:17:47.711905 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:17:49 crc kubenswrapper[4769]: I0131 17:17:49.708788 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:17:49 crc kubenswrapper[4769]: I0131 17:17:49.708879 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:17:49 crc kubenswrapper[4769]: I0131 17:17:49.708909 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:17:49 crc kubenswrapper[4769]: I0131 17:17:49.708994 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:17:49 crc kubenswrapper[4769]: I0131 17:17:49.709038 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:17:49 crc kubenswrapper[4769]: E0131 17:17:49.709410 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:17:51 crc kubenswrapper[4769]: I0131 17:17:51.709177 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:17:51 crc kubenswrapper[4769]: I0131 17:17:51.709553 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:17:51 crc kubenswrapper[4769]: I0131 17:17:51.709639 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:17:51 crc kubenswrapper[4769]: E0131 17:17:51.709872 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:17:53 crc kubenswrapper[4769]: I0131 17:17:53.708103 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:17:53 crc kubenswrapper[4769]: I0131 17:17:53.708427 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:17:53 crc kubenswrapper[4769]: E0131 17:17:53.708652 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:00 crc kubenswrapper[4769]: I0131 17:18:00.708070 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:18:00 crc kubenswrapper[4769]: I0131 17:18:00.708583 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:18:00 crc kubenswrapper[4769]: I0131 17:18:00.708667 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.708512 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.708836 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.708859 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.708914 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.708950 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:18:01 crc kubenswrapper[4769]: E0131 17:18:01.709287 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800206 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" exitCode=1 Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800236 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" exitCode=1 Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800243 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" exitCode=1 Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800262 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3"} Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800288 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c"} Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd"} Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800315 4769 scope.go:117] "RemoveContainer" containerID="24a94dae6f7672de022836fe383e9a1df16a07423b4ffed3c01db43a4d952d98" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.800987 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.801051 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.801163 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:18:01 crc kubenswrapper[4769]: E0131 17:18:01.801406 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.848248 4769 scope.go:117] "RemoveContainer" containerID="0dd9545c12ba7121dbfacc14fa99efe6c52da5daa3b0c89a71e5af63f8c99eda" Jan 31 17:18:01 crc kubenswrapper[4769]: I0131 17:18:01.893223 4769 scope.go:117] "RemoveContainer" containerID="634222e06565f9a8960db793ad16c4794ee090ac4d43ace186f3daf20b881cd9" Jan 31 17:18:02 crc kubenswrapper[4769]: I0131 17:18:02.730953 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:18:02 crc kubenswrapper[4769]: I0131 17:18:02.731315 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:18:02 crc kubenswrapper[4769]: I0131 17:18:02.731432 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.844630 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" exitCode=1 Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.844950 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" exitCode=1 Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.844959 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" exitCode=1 Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.844723 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a"} Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.844994 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3"} Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.845011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c"} Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.845028 4769 scope.go:117] "RemoveContainer" containerID="02962f2b97310c8a4c458064c36a004fdb8bf734f79a6cefa9903589110e2751" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.846062 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.846175 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.846345 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:18:03 crc kubenswrapper[4769]: E0131 17:18:03.846775 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.919325 4769 scope.go:117] "RemoveContainer" containerID="6cf22fe5662840a8c5cb4e5929a4fb93dc807d17766ba53f099e7b3a63f63ef8" Jan 31 17:18:03 crc kubenswrapper[4769]: I0131 17:18:03.966944 4769 scope.go:117] "RemoveContainer" containerID="014b73c8ef938632d42113db88d83d9c9af346c48d23501738b4fe440f72b44f" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.400208 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/util/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.538623 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/util/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.562295 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/pull/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.604065 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/pull/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.791460 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/util/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.798400 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/pull/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.799955 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_55c7a49163ba348c10e2be21119f4ca8799dffa34873699cfe8f8b6d7bnwqd7_e0b17231-5c20-443c-8fc8-6099f8d88e96/extract/0.log" Jan 31 17:18:06 crc kubenswrapper[4769]: I0131 17:18:06.955869 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.096534 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.105738 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/pull/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.131905 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/pull/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.282906 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.300814 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/pull/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.353279 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8b16d134801811491f87cb7f2092ac95279608e09f110b7bb4e1bb9281p2hdz_2fde53db-e7ae-4954-a4e8-2c3c28312031/extract/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.446671 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.649280 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.685260 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/pull/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.687265 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/pull/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.865967 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/extract/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.886386 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/util/0.log" Jan 31 17:18:07 crc kubenswrapper[4769]: I0131 17:18:07.896971 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5905gjn9_ff9f841c-5bf7-423f-b9ad-4f69f91d3ad7/pull/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.051836 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/util/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.266815 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/pull/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.281955 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/pull/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.300182 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/util/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.474055 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/util/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.474755 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/pull/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.476309 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b43f19b8e3bb8997a527070b172ae030accff9cd1a2f2b076f58d9c4ef4fg77_607e4223-7a52-479b-9347-1c5e55698bcc/extract/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.708968 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.708991 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:18:08 crc kubenswrapper[4769]: E0131 17:18:08.709189 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.721359 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-index-z227n_85c08709-5d57-4967-a160-3a773cffe1c1/registry-server/0.log" Jan 31 17:18:08 crc kubenswrapper[4769]: I0131 17:18:08.817284 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/util/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.083695 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/util/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.097199 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.121874 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.307100 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/util/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.361334 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.420944 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_d7c3b59ed6c2e571e21460d743e5fcd0c5f76cb7c446e474a3d05f7576gfxdv_58664432-2fc3-423c-b54a-14d34b96318c/extract/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.524028 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/util/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.701950 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.706422 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/util/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.714240 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.917341 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/pull/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.920699 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/extract/0.log" Jan 31 17:18:09 crc kubenswrapper[4769]: I0131 17:18:09.944023 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_f5f7435db1a968bc2e4b919cf4f5a8f6719d9ac995e6b095f5b2e84f40pcmg8_0634e0a0-8b72-4218-9076-dc8cfdc6c3e5/util/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.121789 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c68d645db-8rh4f_3758c511-4d8c-4ebd-a8f6-ae939aa41381/manager/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.154039 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-index-t2kpn_c9ad763b-ad6a-4356-8824-0551ae2544f4/registry-server/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.408552 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-9fd969457-jfqwn_1dcc8ec7-f8b6-4f25-84cb-e90affd7ec92/manager/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.476779 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-index-pp4vk_a2a60854-2368-418f-904d-108236556cfd/registry-server/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.544159 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c676df-j2w59_374acfa1-5eda-48fd-964f-7fc81aaab552/manager/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.744347 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-index-594rf_d0deba60-bef5-4108-ab00-ab378d8c9a3b/registry-server/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.779818 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-779fc9694b-kf6dn_729bbda5-c237-4a8e-abc9-a80b755e1cd1/operator/0.log" Jan 31 17:18:10 crc kubenswrapper[4769]: I0131 17:18:10.871706 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-index-bk6tw_48020ab1-7b3f-4269-9ebd-8f275417b7cb/registry-server/0.log" Jan 31 17:18:11 crc kubenswrapper[4769]: I0131 17:18:11.008014 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-7c45849b49-zv5rj_6373c7e8-28b1-4687-b2f0-baab31a0ae5b/manager/0.log" Jan 31 17:18:11 crc kubenswrapper[4769]: I0131 17:18:11.079260 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-index-8j8j5_496e46ab-9926-47d6-9c4e-02b40dee7ffa/registry-server/0.log" Jan 31 17:18:11 crc kubenswrapper[4769]: I0131 17:18:11.362204 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfd447ff5-zwcfv_a992acae-b11b-404d-b2c1-9b26f97725e8/manager/0.log" Jan 31 17:18:13 crc kubenswrapper[4769]: I0131 17:18:13.709379 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:18:13 crc kubenswrapper[4769]: I0131 17:18:13.710128 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:18:13 crc kubenswrapper[4769]: I0131 17:18:13.710306 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:18:13 crc kubenswrapper[4769]: E0131 17:18:13.710960 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:18:14 crc kubenswrapper[4769]: I0131 17:18:14.711812 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:18:14 crc kubenswrapper[4769]: I0131 17:18:14.711876 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:18:14 crc kubenswrapper[4769]: I0131 17:18:14.711898 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:14 crc kubenswrapper[4769]: I0131 17:18:14.711954 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:18:14 crc kubenswrapper[4769]: I0131 17:18:14.711987 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:18:14 crc kubenswrapper[4769]: E0131 17:18:14.712205 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:18 crc kubenswrapper[4769]: I0131 17:18:18.708295 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:18:18 crc kubenswrapper[4769]: I0131 17:18:18.708726 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:18:18 crc kubenswrapper[4769]: I0131 17:18:18.708851 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:18:18 crc kubenswrapper[4769]: E0131 17:18:18.709291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:18:20 crc kubenswrapper[4769]: I0131 17:18:20.682047 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:18:20 crc kubenswrapper[4769]: I0131 17:18:20.682114 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:18:23 crc kubenswrapper[4769]: I0131 17:18:23.708723 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:23 crc kubenswrapper[4769]: I0131 17:18:23.709071 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:18:23 crc kubenswrapper[4769]: E0131 17:18:23.887125 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:23 crc kubenswrapper[4769]: I0131 17:18:23.989577 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40"} Jan 31 17:18:23 crc kubenswrapper[4769]: I0131 17:18:23.990066 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:23 crc kubenswrapper[4769]: E0131 17:18:23.990366 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:23 crc kubenswrapper[4769]: I0131 17:18:23.990558 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.001385 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" exitCode=1 Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.001790 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40"} Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.002062 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.002452 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.002653 4769 scope.go:117] "RemoveContainer" containerID="6c0530e866cc91260b9953bf631bfbceeff9767fb372acc5471afcaeb5dee739" Jan 31 17:18:25 crc kubenswrapper[4769]: E0131 17:18:25.003329 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.708568 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.708934 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.708962 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.709044 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:18:25 crc kubenswrapper[4769]: I0131 17:18:25.709086 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.016920 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.017138 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:18:26 crc kubenswrapper[4769]: E0131 17:18:26.017355 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.045268 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033"} Jan 31 17:18:26 crc kubenswrapper[4769]: E0131 17:18:26.333165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.465342 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-wx75k_3e62e4ba-8115-4140-b8de-07edd8c6fcfd/control-plane-machine-set-operator/0.log" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.578448 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-qt6ps_1ecd290f-188e-4ff0-a52f-6286412a0b5a/kube-rbac-proxy/0.log" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.611883 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-qt6ps_1ecd290f-188e-4ff0-a52f-6286412a0b5a/machine-api-operator/0.log" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.644603 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.710300 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.710367 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:18:26 crc kubenswrapper[4769]: I0131 17:18:26.710453 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:18:26 crc kubenswrapper[4769]: E0131 17:18:26.710712 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.061755 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" exitCode=1 Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062070 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" exitCode=1 Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062087 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" exitCode=1 Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062097 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" exitCode=1 Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062536 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062630 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062668 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062751 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.062768 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:18:27 crc kubenswrapper[4769]: E0131 17:18:27.063002 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063027 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063092 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063269 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125"} Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063296 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033"} Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063306 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2"} Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063316 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2"} Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.063328 4769 scope.go:117] "RemoveContainer" containerID="9dbd9a0ad20346e4c64377f2a67c6289980b41414e80aaee45dc779649f3ac86" Jan 31 17:18:27 crc kubenswrapper[4769]: E0131 17:18:27.063801 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.101318 4769 scope.go:117] "RemoveContainer" containerID="c0adeff93092159437f56d3a16d5a487149aa20a20f6d0f1c321bb6c899e34c2" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.140635 4769 scope.go:117] "RemoveContainer" containerID="9e0bc3e16231f9b53fb4ea1d60f9528601c7abd064da24494465904562243b24" Jan 31 17:18:27 crc kubenswrapper[4769]: I0131 17:18:27.186561 4769 scope.go:117] "RemoveContainer" containerID="b5a2131b280435817997df4440d40e8bfe82db2f050869abe88ef581ee288329" Jan 31 17:18:28 crc kubenswrapper[4769]: I0131 17:18:28.075589 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:18:28 crc kubenswrapper[4769]: I0131 17:18:28.075655 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:18:28 crc kubenswrapper[4769]: I0131 17:18:28.075675 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:28 crc kubenswrapper[4769]: I0131 17:18:28.075729 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:18:28 crc kubenswrapper[4769]: I0131 17:18:28.075758 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:18:28 crc kubenswrapper[4769]: E0131 17:18:28.076073 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:29 crc kubenswrapper[4769]: I0131 17:18:29.708462 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:18:29 crc kubenswrapper[4769]: I0131 17:18:29.708629 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:18:29 crc kubenswrapper[4769]: I0131 17:18:29.708742 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:18:29 crc kubenswrapper[4769]: E0131 17:18:29.709061 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.104725 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" exitCode=1 Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.104776 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47"} Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.104823 4769 scope.go:117] "RemoveContainer" containerID="993b7cfb583c2f3c03283a54e15111bfb34960d250a886a819b21427a34125aa" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105717 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105802 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105836 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105906 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105934 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:18:31 crc kubenswrapper[4769]: I0131 17:18:31.105978 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:18:31 crc kubenswrapper[4769]: E0131 17:18:31.106535 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:38 crc kubenswrapper[4769]: I0131 17:18:38.708424 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:18:38 crc kubenswrapper[4769]: I0131 17:18:38.709141 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:18:38 crc kubenswrapper[4769]: I0131 17:18:38.709322 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:18:38 crc kubenswrapper[4769]: E0131 17:18:38.709911 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:18:41 crc kubenswrapper[4769]: I0131 17:18:41.708789 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:41 crc kubenswrapper[4769]: I0131 17:18:41.708821 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:18:41 crc kubenswrapper[4769]: E0131 17:18:41.709022 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:44 crc kubenswrapper[4769]: I0131 17:18:44.708541 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:18:44 crc kubenswrapper[4769]: I0131 17:18:44.708638 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:18:44 crc kubenswrapper[4769]: I0131 17:18:44.708726 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:18:44 crc kubenswrapper[4769]: E0131 17:18:44.708981 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.709583 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.709936 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.709962 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.710018 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.710026 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:18:46 crc kubenswrapper[4769]: I0131 17:18:46.710062 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:18:46 crc kubenswrapper[4769]: E0131 17:18:46.710396 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:18:50 crc kubenswrapper[4769]: I0131 17:18:50.682052 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:18:50 crc kubenswrapper[4769]: I0131 17:18:50.682370 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:18:50 crc kubenswrapper[4769]: I0131 17:18:50.709356 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:18:50 crc kubenswrapper[4769]: I0131 17:18:50.709488 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:18:50 crc kubenswrapper[4769]: I0131 17:18:50.709700 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:18:50 crc kubenswrapper[4769]: E0131 17:18:50.710165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:18:54 crc kubenswrapper[4769]: I0131 17:18:54.707847 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:18:54 crc kubenswrapper[4769]: I0131 17:18:54.708143 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:18:54 crc kubenswrapper[4769]: E0131 17:18:54.708346 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.169034 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tjsrv_7d9baf9c-e190-482c-85c3-adff9ee82cd7/kube-rbac-proxy/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.301790 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-tjsrv_7d9baf9c-e190-482c-85c3-adff9ee82cd7/controller/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.423555 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-frr-files/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.535846 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-frr-files/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.537748 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-reloader/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.569383 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-metrics/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.581364 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-reloader/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.809036 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-frr-files/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.815881 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-metrics/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.817237 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-reloader/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.840820 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-metrics/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.986761 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-frr-files/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.987096 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-metrics/0.log" Jan 31 17:18:55 crc kubenswrapper[4769]: I0131 17:18:55.999587 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/cp-reloader/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.050510 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/controller/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.150122 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/frr-metrics/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.185228 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/kube-rbac-proxy/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.285013 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/kube-rbac-proxy-frr/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.349302 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/reloader/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.498460 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-2jzj5_093fd348-a77b-4736-97a9-3fe20a0a63f9/frr-k8s-webhook-server/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.590363 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-t6zmk_42156100-4827-4e7a-9cce-ec90993228af/frr/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.664832 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7b5894f8dd-87v5w_fe37bf4b-0671-44be-b390-3ba344ae8d71/manager/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.708597 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.708668 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.708756 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:18:56 crc kubenswrapper[4769]: E0131 17:18:56.709027 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.733426 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-b797d4985-n2hhh_b465a277-1e9b-4534-93b1-aa164b30d12f/webhook-server/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.871853 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-txm8b_1c91ccf4-70f7-4203-be06-eeaa0bd439ba/kube-rbac-proxy/0.log" Jan 31 17:18:56 crc kubenswrapper[4769]: I0131 17:18:56.945036 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-txm8b_1c91ccf4-70f7-4203-be06-eeaa0bd439ba/speaker/0.log" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708320 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708389 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708413 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708460 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708467 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:18:57 crc kubenswrapper[4769]: I0131 17:18:57.708520 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:18:57 crc kubenswrapper[4769]: E0131 17:18:57.708845 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:19:01 crc kubenswrapper[4769]: I0131 17:19:01.708651 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:01 crc kubenswrapper[4769]: I0131 17:19:01.708968 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:01 crc kubenswrapper[4769]: I0131 17:19:01.709052 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:01 crc kubenswrapper[4769]: E0131 17:19:01.709308 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:07 crc kubenswrapper[4769]: I0131 17:19:07.708120 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:19:07 crc kubenswrapper[4769]: I0131 17:19:07.709791 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:19:07 crc kubenswrapper[4769]: E0131 17:19:07.710184 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:19:09 crc kubenswrapper[4769]: I0131 17:19:09.708998 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:19:09 crc kubenswrapper[4769]: I0131 17:19:09.709422 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:19:09 crc kubenswrapper[4769]: I0131 17:19:09.709635 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:19:09 crc kubenswrapper[4769]: E0131 17:19:09.710157 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.039232 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-api-5657759ccd-46xb5_9ee526a1-93ff-433e-85e8-ec37678faa35/barbican-api-log/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.049427 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-api-5657759ccd-46xb5_9ee526a1-93ff-433e-85e8-ec37678faa35/barbican-api/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.231249 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-db-sync-zzwxm_7dbe76af-a39e-4a34-ae20-cc96a79e6b6c/barbican-db-sync/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.301439 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-keystone-listener-645f4bcb9b-jctzj_04fee216-053a-4105-b59f-edca6bd15bdb/barbican-keystone-listener/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.360976 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-keystone-listener-645f4bcb9b-jctzj_04fee216-053a-4105-b59f-edca6bd15bdb/barbican-keystone-listener-log/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.412807 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-worker-7dddbdf7d9-wrhnh_ef94300f-9ac8-484f-8462-00ad88314f4a/barbican-worker/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.484536 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_barbican-worker-7dddbdf7d9-wrhnh_ef94300f-9ac8-484f-8462-00ad88314f4a/barbican-worker-log/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.685348 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_keystone-c9ff7c785-ktbq4_8b05a639-8fd8-4f42-97e8-e946783ae05d/keystone-api/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.697227 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_keystone-cron-29497981-l6wdw_58e62f4c-9b5c-49e1-a5b4-bde6e98c763e/keystone-cron/0.log" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.708534 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.708671 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.708760 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.708862 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.708921 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.709001 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:19:11 crc kubenswrapper[4769]: E0131 17:19:11.709350 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:19:11 crc kubenswrapper[4769]: I0131 17:19:11.878971 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-0_4215cc6b-6982-4db5-bec8-2cf774a3cd59/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.095455 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-0_4215cc6b-6982-4db5-bec8-2cf774a3cd59/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.097253 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-0_4215cc6b-6982-4db5-bec8-2cf774a3cd59/galera/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.178056 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_memcached-0_99d531b0-8a9a-4568-801f-d4423fd63af5/memcached/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.296037 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-1_322a07d1-dd08-4c79-a511-bab71e44d9e9/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.471450 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-1_322a07d1-dd08-4c79-a511-bab71e44d9e9/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.479123 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-1_322a07d1-dd08-4c79-a511-bab71e44d9e9/galera/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.495611 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-2_cc26650a-e98c-4c58-bfea-16ebfc50a1a1/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.634985 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-2_cc26650a-e98c-4c58-bfea-16ebfc50a1a1/mysql-bootstrap/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.677995 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_rabbitmq-server-0_99300305-94b5-426d-a930-a4420fc775d7/setup-container/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.694813 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_openstack-galera-2_cc26650a-e98c-4c58-bfea-16ebfc50a1a1/galera/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.848433 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-proxy-7d8cf99555-gcpxn_fb764692-fbb8-4fb4-860c-2cd0e0cfd452/proxy-httpd/15.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.893152 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_rabbitmq-server-0_99300305-94b5-426d-a930-a4420fc775d7/rabbitmq/0.log" Jan 31 17:19:12 crc kubenswrapper[4769]: I0131 17:19:12.926058 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_rabbitmq-server-0_99300305-94b5-426d-a930-a4420fc775d7/setup-container/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.056962 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-proxy-7d8cf99555-gcpxn_fb764692-fbb8-4fb4-860c-2cd0e0cfd452/proxy-httpd/15.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.084227 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-proxy-7d8cf99555-gcpxn_fb764692-fbb8-4fb4-860c-2cd0e0cfd452/proxy-server/11.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.090386 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-proxy-7d8cf99555-gcpxn_fb764692-fbb8-4fb4-860c-2cd0e0cfd452/proxy-server/11.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.267593 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/account-reaper/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.285559 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/account-auditor/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.412779 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/account-replicator/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.422532 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/account-replicator/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.483981 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/account-server/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.491871 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-auditor/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.597314 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-replicator/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.632389 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-replicator/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.636268 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-sharder/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.775000 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-server/0.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.893726 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-sharder/9.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.903608 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-updater/5.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.930848 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/container-updater/5.log" Jan 31 17:19:13 crc kubenswrapper[4769]: I0131 17:19:13.945137 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-auditor/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.062644 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-expirer/9.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.089001 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-server/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.112406 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-replicator/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.131312 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-expirer/9.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.207875 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-updater/5.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.283245 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/object-updater/5.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.305351 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/rsync/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.311384 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-0_13aa61f9-8314-4571-afce-8c24594fa917/swift-recon-cron/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.430529 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/account-auditor/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.468418 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/account-reaper/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.476951 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/account-replicator/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.482179 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/account-replicator/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.577453 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/account-server/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.607410 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-auditor/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.625365 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-replicator/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.655126 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-replicator/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.658224 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-server/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.782865 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-auditor/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.798139 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-updater/3.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.799982 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/container-updater/4.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.830987 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-expirer/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.836068 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-expirer/7.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.949413 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-replicator/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.955122 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-server/0.log" Jan 31 17:19:14 crc kubenswrapper[4769]: I0131 17:19:14.972449 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-updater/3.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.013363 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/rsync/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.013542 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/object-updater/2.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.112789 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-1_a2e45b15-42ba-44b4-91c5-fa5bc64d7a43/swift-recon-cron/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.128607 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/account-auditor/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.209104 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/account-replicator/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.223923 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/account-reaper/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.295126 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-auditor/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.295975 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/account-server/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.297717 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/account-replicator/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.370921 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-replicator/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.384936 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-replicator/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.434316 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-updater/2.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.449381 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-updater/3.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.479011 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/container-server/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.515197 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-auditor/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.578453 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-expirer/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.586409 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-expirer/7.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.621581 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-server/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.648326 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-replicator/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.689113 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-updater/2.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.707740 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.707808 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.707919 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:15 crc kubenswrapper[4769]: E0131 17:19:15.708162 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.736800 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/rsync/0.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.771692 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/object-updater/1.log" Jan 31 17:19:15 crc kubenswrapper[4769]: I0131 17:19:15.824173 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/swift-kuttl-tests_swift-storage-2_fd794dbe-f3dd-4a87-8b3f-612f46a05b2b/swift-recon-cron/0.log" Jan 31 17:19:20 crc kubenswrapper[4769]: I0131 17:19:20.681966 4769 patch_prober.go:28] interesting pod/machine-config-daemon-4bqbm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 31 17:19:20 crc kubenswrapper[4769]: I0131 17:19:20.682303 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 31 17:19:20 crc kubenswrapper[4769]: I0131 17:19:20.682380 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" Jan 31 17:19:20 crc kubenswrapper[4769]: I0131 17:19:20.683116 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f"} pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 31 17:19:20 crc kubenswrapper[4769]: I0131 17:19:20.683188 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerName="machine-config-daemon" containerID="cri-o://4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" gracePeriod=600 Jan 31 17:19:20 crc kubenswrapper[4769]: E0131 17:19:20.810602 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.539030 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267" exitCode=1 Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.539082 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267"} Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.539429 4769 scope.go:117] "RemoveContainer" containerID="70253717258a7c161800c42af8c4f4bd84e9abe68bfbd78aad2db3dae3a77cba" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.540232 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.540294 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.540363 4769 scope.go:117] "RemoveContainer" containerID="62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.540385 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:21 crc kubenswrapper[4769]: E0131 17:19:21.540715 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.549302 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" exitCode=0 Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.549360 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" event={"ID":"1d352f75-43f7-4b8c-867e-cfb17bbbe011","Type":"ContainerDied","Data":"4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f"} Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.549877 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:19:21 crc kubenswrapper[4769]: E0131 17:19:21.550070 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.594270 4769 scope.go:117] "RemoveContainer" containerID="6e828f88c834e15dfb7cfff730fff4effd93e0ba9e72273ac0004887e05469f3" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.708326 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.708413 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:19:21 crc kubenswrapper[4769]: I0131 17:19:21.708617 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:19:21 crc kubenswrapper[4769]: E0131 17:19:21.708954 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:19:22 crc kubenswrapper[4769]: I0131 17:19:22.711900 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:19:22 crc kubenswrapper[4769]: I0131 17:19:22.712776 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:19:22 crc kubenswrapper[4769]: E0131 17:19:22.713104 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709059 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709146 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709173 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709217 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709225 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:19:25 crc kubenswrapper[4769]: I0131 17:19:25.709256 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:19:25 crc kubenswrapper[4769]: E0131 17:19:25.709587 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.615347 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" exitCode=1 Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.615415 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b"} Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.615462 4769 scope.go:117] "RemoveContainer" containerID="0e5e5a5d761db04efdc8a34375d7448ec177789e0842ab95390ff58a38850fa6" Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.616549 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.616667 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.616829 4769 scope.go:117] "RemoveContainer" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" Jan 31 17:19:27 crc kubenswrapper[4769]: I0131 17:19:27.616866 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:19:27 crc kubenswrapper[4769]: E0131 17:19:27.617461 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.159904 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/util/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.372165 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/util/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.390639 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/pull/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.429755 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/pull/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.573105 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/util/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.590971 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/extract/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.596363 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhhn9_fd84e8ff-8554-4ee0-a41a-35f5146d7873/pull/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.737715 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-utilities/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.876345 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-content/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.877910 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-utilities/0.log" Jan 31 17:19:29 crc kubenswrapper[4769]: I0131 17:19:29.891633 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-content/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.162875 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-content/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.168279 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/extract-utilities/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.390541 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-utilities/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.617202 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qskmw_2b46f121-f349-493b-9835-6ee6b3bc3ec6/registry-server/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.628412 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-content/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.635927 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-utilities/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.663308 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-content/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.823223 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-utilities/0.log" Jan 31 17:19:30 crc kubenswrapper[4769]: I0131 17:19:30.855625 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/extract-content/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.075749 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-47rlr_ab91e141-74c5-4c96-87fb-f0f1d41f7456/marketplace-operator/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.124137 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-utilities/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.301447 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-utilities/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.314741 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-znn46_04bbbf10-31ac-42ee-9ea9-bb96cfa7fb99/registry-server/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.315017 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-content/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.338180 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-content/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.474725 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-utilities/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.538109 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/extract-content/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.586720 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kkrd9_b8c1fd17-896d-4f5d-b8c5-378a95dceb35/registry-server/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.683221 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-utilities/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.806126 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-utilities/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.838483 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-content/0.log" Jan 31 17:19:31 crc kubenswrapper[4769]: I0131 17:19:31.850730 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-content/0.log" Jan 31 17:19:32 crc kubenswrapper[4769]: I0131 17:19:32.017102 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-content/0.log" Jan 31 17:19:32 crc kubenswrapper[4769]: I0131 17:19:32.030191 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/extract-utilities/0.log" Jan 31 17:19:32 crc kubenswrapper[4769]: I0131 17:19:32.477243 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-scx7g_a488f9b4-4988-4cc1-8085-1c4410a2aa7b/registry-server/0.log" Jan 31 17:19:32 crc kubenswrapper[4769]: I0131 17:19:32.712521 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:19:32 crc kubenswrapper[4769]: E0131 17:19:32.713130 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.708050 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.708333 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:19:33 crc kubenswrapper[4769]: E0131 17:19:33.708561 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.709128 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.709269 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.709426 4769 scope.go:117] "RemoveContainer" containerID="62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267" Jan 31 17:19:33 crc kubenswrapper[4769]: I0131 17:19:33.709524 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:33 crc kubenswrapper[4769]: E0131 17:19:33.709950 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 20s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:34 crc kubenswrapper[4769]: I0131 17:19:34.844995 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:19:34 crc kubenswrapper[4769]: E0131 17:19:34.845173 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:19:34 crc kubenswrapper[4769]: E0131 17:19:34.845254 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:21:36.845234637 +0000 UTC m=+3144.919403306 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:19:38 crc kubenswrapper[4769]: I0131 17:19:38.707914 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:19:38 crc kubenswrapper[4769]: I0131 17:19:38.708402 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:19:38 crc kubenswrapper[4769]: I0131 17:19:38.708564 4769 scope.go:117] "RemoveContainer" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" Jan 31 17:19:38 crc kubenswrapper[4769]: I0131 17:19:38.708575 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:19:38 crc kubenswrapper[4769]: E0131 17:19:38.708943 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.708819 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.708882 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.708903 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.708946 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.708953 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:19:39 crc kubenswrapper[4769]: I0131 17:19:39.709006 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:19:39 crc kubenswrapper[4769]: E0131 17:19:39.709369 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:19:43 crc kubenswrapper[4769]: I0131 17:19:43.708538 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:19:43 crc kubenswrapper[4769]: E0131 17:19:43.709256 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.707928 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.708271 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.708923 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.709065 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.709254 4769 scope.go:117] "RemoveContainer" containerID="62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267" Jan 31 17:19:46 crc kubenswrapper[4769]: I0131 17:19:46.709271 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:46 crc kubenswrapper[4769]: E0131 17:19:46.709770 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:19:46 crc kubenswrapper[4769]: E0131 17:19:46.875424 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:47 crc kubenswrapper[4769]: I0131 17:19:47.775019 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4"} Jan 31 17:19:47 crc kubenswrapper[4769]: I0131 17:19:47.775863 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:47 crc kubenswrapper[4769]: I0131 17:19:47.775920 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:47 crc kubenswrapper[4769]: I0131 17:19:47.776007 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:47 crc kubenswrapper[4769]: E0131 17:19:47.776253 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:19:48 crc kubenswrapper[4769]: E0131 17:19:48.640282 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:19:48 crc kubenswrapper[4769]: I0131 17:19:48.782808 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:19:50 crc kubenswrapper[4769]: I0131 17:19:50.709351 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:19:50 crc kubenswrapper[4769]: I0131 17:19:50.709927 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:19:50 crc kubenswrapper[4769]: I0131 17:19:50.710094 4769 scope.go:117] "RemoveContainer" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" Jan 31 17:19:50 crc kubenswrapper[4769]: I0131 17:19:50.710109 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:19:50 crc kubenswrapper[4769]: E0131 17:19:50.710762 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.708598 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.709078 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.709129 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.709243 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.709260 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:19:53 crc kubenswrapper[4769]: I0131 17:19:53.709343 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:19:53 crc kubenswrapper[4769]: E0131 17:19:53.709969 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:19:56 crc kubenswrapper[4769]: I0131 17:19:56.708733 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:19:56 crc kubenswrapper[4769]: E0131 17:19:56.711610 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:19:58 crc kubenswrapper[4769]: I0131 17:19:58.708940 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:19:58 crc kubenswrapper[4769]: I0131 17:19:58.709175 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:19:58 crc kubenswrapper[4769]: E0131 17:19:58.709418 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:19:59 crc kubenswrapper[4769]: I0131 17:19:59.712453 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:19:59 crc kubenswrapper[4769]: I0131 17:19:59.712624 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:19:59 crc kubenswrapper[4769]: I0131 17:19:59.712799 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:19:59 crc kubenswrapper[4769]: E0131 17:19:59.713253 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:20:04 crc kubenswrapper[4769]: I0131 17:20:04.708069 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:20:04 crc kubenswrapper[4769]: I0131 17:20:04.708403 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:20:04 crc kubenswrapper[4769]: I0131 17:20:04.708474 4769 scope.go:117] "RemoveContainer" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" Jan 31 17:20:04 crc kubenswrapper[4769]: I0131 17:20:04.708481 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:20:04 crc kubenswrapper[4769]: E0131 17:20:04.708757 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.707955 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.708276 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.708298 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.708343 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.708348 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:05 crc kubenswrapper[4769]: I0131 17:20:05.708378 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:05 crc kubenswrapper[4769]: E0131 17:20:05.708672 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:20:10 crc kubenswrapper[4769]: I0131 17:20:10.708736 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:20:10 crc kubenswrapper[4769]: I0131 17:20:10.709224 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:20:10 crc kubenswrapper[4769]: I0131 17:20:10.709319 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:20:10 crc kubenswrapper[4769]: E0131 17:20:10.709389 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:20:10 crc kubenswrapper[4769]: I0131 17:20:10.709456 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:20:10 crc kubenswrapper[4769]: E0131 17:20:10.709870 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:20:11 crc kubenswrapper[4769]: I0131 17:20:11.711689 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:20:11 crc kubenswrapper[4769]: I0131 17:20:11.711736 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:11 crc kubenswrapper[4769]: E0131 17:20:11.712110 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.710658 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.711491 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.711618 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.711740 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.711758 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:17 crc kubenswrapper[4769]: I0131 17:20:17.711865 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:17 crc kubenswrapper[4769]: E0131 17:20:17.965290 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.039361 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5"} Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.040165 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.040248 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.040352 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.040367 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.040408 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:18 crc kubenswrapper[4769]: E0131 17:20:18.040803 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.707913 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.708336 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.708433 4769 scope.go:117] "RemoveContainer" containerID="682a8e121d8f06e40ccdf4a6fab0fb4c2740c871706d022858131ee2ec27a66b" Jan 31 17:20:18 crc kubenswrapper[4769]: I0131 17:20:18.708443 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:20:18 crc kubenswrapper[4769]: E0131 17:20:18.975624 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:20:19 crc kubenswrapper[4769]: I0131 17:20:19.059738 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"6bdae79c2afb0cde98e22d0e63724fec5c6aa813ba882105dde519803f980fe9"} Jan 31 17:20:19 crc kubenswrapper[4769]: I0131 17:20:19.060432 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:20:19 crc kubenswrapper[4769]: I0131 17:20:19.060526 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:20:19 crc kubenswrapper[4769]: I0131 17:20:19.060646 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:20:19 crc kubenswrapper[4769]: E0131 17:20:19.060961 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:20:22 crc kubenswrapper[4769]: I0131 17:20:22.717765 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:20:22 crc kubenswrapper[4769]: I0131 17:20:22.718176 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:20:22 crc kubenswrapper[4769]: I0131 17:20:22.718354 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:20:22 crc kubenswrapper[4769]: E0131 17:20:22.718960 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:20:24 crc kubenswrapper[4769]: I0131 17:20:24.710462 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:20:24 crc kubenswrapper[4769]: E0131 17:20:24.711375 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:20:25 crc kubenswrapper[4769]: I0131 17:20:25.708847 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:20:25 crc kubenswrapper[4769]: I0131 17:20:25.708894 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:25 crc kubenswrapper[4769]: E0131 17:20:25.709404 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:31 crc kubenswrapper[4769]: I0131 17:20:31.709570 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:31 crc kubenswrapper[4769]: I0131 17:20:31.710044 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:31 crc kubenswrapper[4769]: I0131 17:20:31.710230 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:31 crc kubenswrapper[4769]: I0131 17:20:31.710249 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:31 crc kubenswrapper[4769]: I0131 17:20:31.710323 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:31 crc kubenswrapper[4769]: E0131 17:20:31.711015 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:20:33 crc kubenswrapper[4769]: I0131 17:20:33.710432 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:20:33 crc kubenswrapper[4769]: I0131 17:20:33.710639 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:20:33 crc kubenswrapper[4769]: I0131 17:20:33.710839 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:20:33 crc kubenswrapper[4769]: E0131 17:20:33.711349 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:20:36 crc kubenswrapper[4769]: I0131 17:20:36.709122 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:20:36 crc kubenswrapper[4769]: I0131 17:20:36.709824 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:20:36 crc kubenswrapper[4769]: I0131 17:20:36.710003 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:20:36 crc kubenswrapper[4769]: E0131 17:20:36.710461 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:20:39 crc kubenswrapper[4769]: I0131 17:20:39.709063 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:20:39 crc kubenswrapper[4769]: E0131 17:20:39.711003 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:20:40 crc kubenswrapper[4769]: I0131 17:20:40.708714 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:20:40 crc kubenswrapper[4769]: I0131 17:20:40.708759 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:40 crc kubenswrapper[4769]: E0131 17:20:40.709445 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:42 crc kubenswrapper[4769]: I0131 17:20:42.293248 4769 generic.go:334] "Generic (PLEG): container finished" podID="29cb450c-f082-4909-977f-840f5f050086" containerID="245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5" exitCode=0 Jan 31 17:20:42 crc kubenswrapper[4769]: I0131 17:20:42.293544 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" event={"ID":"29cb450c-f082-4909-977f-840f5f050086","Type":"ContainerDied","Data":"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5"} Jan 31 17:20:42 crc kubenswrapper[4769]: I0131 17:20:42.296647 4769 scope.go:117] "RemoveContainer" containerID="245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5" Jan 31 17:20:43 crc kubenswrapper[4769]: I0131 17:20:43.267518 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fk6lp_must-gather-9kcd6_29cb450c-f082-4909-977f-840f5f050086/gather/0.log" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.709335 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.709973 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710152 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710251 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710413 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710607 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710623 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:46 crc kubenswrapper[4769]: I0131 17:20:46.710734 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:46 crc kubenswrapper[4769]: E0131 17:20:46.710781 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:20:46 crc kubenswrapper[4769]: E0131 17:20:46.711358 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.363635 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" exitCode=1 Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.363713 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f"} Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.364232 4769 scope.go:117] "RemoveContainer" containerID="1b75bb5aaeeefb7a7abaa985fce7c388ff1391793e0add450800b7d3c4c5b861" Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.364887 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.364939 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.364986 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:20:48 crc kubenswrapper[4769]: I0131 17:20:48.365063 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:20:48 crc kubenswrapper[4769]: E0131 17:20:48.365330 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:20:50 crc kubenswrapper[4769]: I0131 17:20:50.657557 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fk6lp/must-gather-9kcd6"] Jan 31 17:20:50 crc kubenswrapper[4769]: I0131 17:20:50.658211 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" podUID="29cb450c-f082-4909-977f-840f5f050086" containerName="copy" containerID="cri-o://20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a" gracePeriod=2 Jan 31 17:20:50 crc kubenswrapper[4769]: I0131 17:20:50.664439 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fk6lp/must-gather-9kcd6"] Jan 31 17:20:50 crc kubenswrapper[4769]: I0131 17:20:50.992599 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fk6lp_must-gather-9kcd6_29cb450c-f082-4909-977f-840f5f050086/copy/0.log" Jan 31 17:20:50 crc kubenswrapper[4769]: I0131 17:20:50.993205 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.042116 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output\") pod \"29cb450c-f082-4909-977f-840f5f050086\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.042208 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqgjs\" (UniqueName: \"kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs\") pod \"29cb450c-f082-4909-977f-840f5f050086\" (UID: \"29cb450c-f082-4909-977f-840f5f050086\") " Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.048076 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs" (OuterVolumeSpecName: "kube-api-access-dqgjs") pod "29cb450c-f082-4909-977f-840f5f050086" (UID: "29cb450c-f082-4909-977f-840f5f050086"). InnerVolumeSpecName "kube-api-access-dqgjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.125834 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "29cb450c-f082-4909-977f-840f5f050086" (UID: "29cb450c-f082-4909-977f-840f5f050086"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.144861 4769 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/29cb450c-f082-4909-977f-840f5f050086-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.144921 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqgjs\" (UniqueName: \"kubernetes.io/projected/29cb450c-f082-4909-977f-840f5f050086-kube-api-access-dqgjs\") on node \"crc\" DevicePath \"\"" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.404822 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fk6lp_must-gather-9kcd6_29cb450c-f082-4909-977f-840f5f050086/copy/0.log" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.405428 4769 generic.go:334] "Generic (PLEG): container finished" podID="29cb450c-f082-4909-977f-840f5f050086" containerID="20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a" exitCode=143 Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.405528 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fk6lp/must-gather-9kcd6" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.405585 4769 scope.go:117] "RemoveContainer" containerID="20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.423912 4769 scope.go:117] "RemoveContainer" containerID="245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.481930 4769 scope.go:117] "RemoveContainer" containerID="20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a" Jan 31 17:20:51 crc kubenswrapper[4769]: E0131 17:20:51.483181 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a\": container with ID starting with 20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a not found: ID does not exist" containerID="20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.483222 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a"} err="failed to get container status \"20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a\": rpc error: code = NotFound desc = could not find container \"20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a\": container with ID starting with 20b43e9e319f73a74f105a3b5b855dd70ebe8c8cbb3577e9bba9f0217833725a not found: ID does not exist" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.483249 4769 scope.go:117] "RemoveContainer" containerID="245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5" Jan 31 17:20:51 crc kubenswrapper[4769]: E0131 17:20:51.484917 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5\": container with ID starting with 245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5 not found: ID does not exist" containerID="245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5" Jan 31 17:20:51 crc kubenswrapper[4769]: I0131 17:20:51.484994 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5"} err="failed to get container status \"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5\": rpc error: code = NotFound desc = could not find container \"245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5\": container with ID starting with 245c83ffdfca6976b09c03d8f2f74c1d869a30f50e8647bf9ada60e27c97a9c5 not found: ID does not exist" Jan 31 17:20:52 crc kubenswrapper[4769]: I0131 17:20:52.732080 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29cb450c-f082-4909-977f-840f5f050086" path="/var/lib/kubelet/pods/29cb450c-f082-4909-977f-840f5f050086/volumes" Jan 31 17:20:54 crc kubenswrapper[4769]: I0131 17:20:54.710021 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:20:54 crc kubenswrapper[4769]: E0131 17:20:54.711140 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:20:54 crc kubenswrapper[4769]: I0131 17:20:54.711428 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:20:54 crc kubenswrapper[4769]: I0131 17:20:54.711470 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:54 crc kubenswrapper[4769]: E0131 17:20:54.886671 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:55 crc kubenswrapper[4769]: I0131 17:20:55.445236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192"} Jan 31 17:20:55 crc kubenswrapper[4769]: I0131 17:20:55.445529 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:20:55 crc kubenswrapper[4769]: I0131 17:20:55.446043 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:55 crc kubenswrapper[4769]: E0131 17:20:55.446327 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:56 crc kubenswrapper[4769]: I0131 17:20:56.458579 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:20:56 crc kubenswrapper[4769]: E0131 17:20:56.459265 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.650627 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.709473 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.709637 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.709810 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.709827 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:20:59 crc kubenswrapper[4769]: I0131 17:20:59.709891 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:20:59 crc kubenswrapper[4769]: E0131 17:20:59.710390 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:01 crc kubenswrapper[4769]: I0131 17:21:01.646322 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:01 crc kubenswrapper[4769]: I0131 17:21:01.711015 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:21:01 crc kubenswrapper[4769]: I0131 17:21:01.711144 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:21:01 crc kubenswrapper[4769]: I0131 17:21:01.711324 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:21:01 crc kubenswrapper[4769]: E0131 17:21:01.712448 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:21:02 crc kubenswrapper[4769]: I0131 17:21:02.647929 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:02 crc kubenswrapper[4769]: I0131 17:21:02.716306 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:02 crc kubenswrapper[4769]: I0131 17:21:02.716402 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:02 crc kubenswrapper[4769]: I0131 17:21:02.716433 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:21:02 crc kubenswrapper[4769]: I0131 17:21:02.716601 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:02 crc kubenswrapper[4769]: E0131 17:21:02.717037 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.548655 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" exitCode=1 Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.548699 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4"} Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.548984 4769 scope.go:117] "RemoveContainer" containerID="62b924cb2aedb0674029c40a405b05ea5a55bce2923841575330407035bc1267" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.549857 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.549970 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.550017 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.550100 4769 scope.go:117] "RemoveContainer" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.550141 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:05 crc kubenswrapper[4769]: E0131 17:21:05.550710 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.646755 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.646862 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.647648 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.647691 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.647726 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192" gracePeriod=30 Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.651032 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:05 crc kubenswrapper[4769]: I0131 17:21:05.708246 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:21:05 crc kubenswrapper[4769]: E0131 17:21:05.708772 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:21:05 crc kubenswrapper[4769]: E0131 17:21:05.931387 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.560050 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192" exitCode=0 Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.560250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192"} Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.560372 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443"} Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.560393 4769 scope.go:117] "RemoveContainer" containerID="3c8c682bf057b3f3cd818ea1598fbb8e267b962e0187e1fb9693415421d4042c" Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.560879 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:06 crc kubenswrapper[4769]: E0131 17:21:06.561024 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:06 crc kubenswrapper[4769]: I0131 17:21:06.561159 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:21:07 crc kubenswrapper[4769]: I0131 17:21:07.581766 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:07 crc kubenswrapper[4769]: E0131 17:21:07.582291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.599008 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" exitCode=1 Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.599026 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54"} Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.599105 4769 scope.go:117] "RemoveContainer" containerID="b0862079610db9d5f62f67aa455181facb401ffa066642d56a42e08534f14464" Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.600202 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.600354 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.600413 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:21:08 crc kubenswrapper[4769]: I0131 17:21:08.600606 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:21:08 crc kubenswrapper[4769]: E0131 17:21:08.601146 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:21:11 crc kubenswrapper[4769]: I0131 17:21:11.649154 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:11 crc kubenswrapper[4769]: I0131 17:21:11.649359 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:13 crc kubenswrapper[4769]: I0131 17:21:13.708396 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:13 crc kubenswrapper[4769]: I0131 17:21:13.708753 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:13 crc kubenswrapper[4769]: I0131 17:21:13.708823 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:21:13 crc kubenswrapper[4769]: I0131 17:21:13.708830 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:13 crc kubenswrapper[4769]: I0131 17:21:13.708860 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:13 crc kubenswrapper[4769]: E0131 17:21:13.897283 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.647012 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.671972 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e"} Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.672699 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.672794 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.672907 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:14 crc kubenswrapper[4769]: I0131 17:21:14.672954 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:14 crc kubenswrapper[4769]: E0131 17:21:14.673314 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.651205 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.707922 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.708018 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.708040 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.708084 4769 scope.go:117] "RemoveContainer" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" Jan 31 17:21:16 crc kubenswrapper[4769]: I0131 17:21:16.708091 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:16 crc kubenswrapper[4769]: E0131 17:21:16.708349 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.647801 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.648235 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.649049 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="proxy-httpd" containerStatusID={"Type":"cri-o","ID":"d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443"} pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" containerMessage="Container proxy-httpd failed liveness probe, will be restarted" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.649085 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.649119 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" containerID="cri-o://d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" gracePeriod=30 Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.649629 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 31 17:21:17 crc kubenswrapper[4769]: I0131 17:21:17.708849 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:21:17 crc kubenswrapper[4769]: E0131 17:21:17.709876 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:21:17 crc kubenswrapper[4769]: E0131 17:21:17.782139 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:18 crc kubenswrapper[4769]: I0131 17:21:18.708851 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" exitCode=0 Jan 31 17:21:18 crc kubenswrapper[4769]: I0131 17:21:18.718829 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443"} Jan 31 17:21:18 crc kubenswrapper[4769]: I0131 17:21:18.719121 4769 scope.go:117] "RemoveContainer" containerID="2239d4047e9d3041c7d8ab2c0d24dcd7314b749a9bae24b7bfb0538d88246192" Jan 31 17:21:18 crc kubenswrapper[4769]: I0131 17:21:18.720093 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:21:18 crc kubenswrapper[4769]: I0131 17:21:18.720199 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:18 crc kubenswrapper[4769]: E0131 17:21:18.721051 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:21 crc kubenswrapper[4769]: I0131 17:21:21.709088 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:21:21 crc kubenswrapper[4769]: I0131 17:21:21.709711 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:21:21 crc kubenswrapper[4769]: I0131 17:21:21.709778 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:21:21 crc kubenswrapper[4769]: I0131 17:21:21.709940 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:21:21 crc kubenswrapper[4769]: E0131 17:21:21.710764 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:21:27 crc kubenswrapper[4769]: I0131 17:21:27.709387 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:27 crc kubenswrapper[4769]: I0131 17:21:27.710077 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:27 crc kubenswrapper[4769]: I0131 17:21:27.710276 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:27 crc kubenswrapper[4769]: I0131 17:21:27.710348 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:27 crc kubenswrapper[4769]: E0131 17:21:27.710893 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.708741 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.709116 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:30 crc kubenswrapper[4769]: E0131 17:21:30.709587 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.710128 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.710290 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.710361 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.710486 4769 scope.go:117] "RemoveContainer" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" Jan 31 17:21:30 crc kubenswrapper[4769]: I0131 17:21:30.710549 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:30 crc kubenswrapper[4769]: E0131 17:21:30.945079 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.708657 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:21:31 crc kubenswrapper[4769]: E0131 17:21:31.709416 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.851688 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0"} Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.852699 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.852820 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.852979 4769 scope.go:117] "RemoveContainer" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" Jan 31 17:21:31 crc kubenswrapper[4769]: I0131 17:21:31.853001 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:31 crc kubenswrapper[4769]: E0131 17:21:31.853602 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 40s restarting failed container=object-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.869688 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" exitCode=1 Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.869748 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e"} Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.869819 4769 scope.go:117] "RemoveContainer" containerID="c62e2433e6dd447fa3a7ea820441d82ce4079d9f21bdd74fac77729e6d1a9e47" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.870865 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.870999 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.871169 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.871215 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:32 crc kubenswrapper[4769]: I0131 17:21:32.871287 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:32 crc kubenswrapper[4769]: E0131 17:21:32.871864 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:36 crc kubenswrapper[4769]: I0131 17:21:36.709028 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:21:36 crc kubenswrapper[4769]: I0131 17:21:36.709458 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:21:36 crc kubenswrapper[4769]: I0131 17:21:36.709488 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:21:36 crc kubenswrapper[4769]: I0131 17:21:36.709584 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:21:36 crc kubenswrapper[4769]: E0131 17:21:36.709963 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:21:36 crc kubenswrapper[4769]: I0131 17:21:36.847673 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:21:36 crc kubenswrapper[4769]: E0131 17:21:36.847864 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:21:36 crc kubenswrapper[4769]: E0131 17:21:36.848154 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:23:38.848124841 +0000 UTC m=+3266.922293550 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found Jan 31 17:21:43 crc kubenswrapper[4769]: I0131 17:21:43.708526 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:21:43 crc kubenswrapper[4769]: E0131 17:21:43.709309 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.708095 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.708933 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709049 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709134 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709179 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709226 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709235 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709275 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709301 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709484 4769 scope.go:117] "RemoveContainer" containerID="bfa98bf34cbf02bf96128bc45d5381ea8373c95f3e5302a55fe40ce8c1cadeb4" Jan 31 17:21:45 crc kubenswrapper[4769]: I0131 17:21:45.709529 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:45 crc kubenswrapper[4769]: E0131 17:21:45.709677 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:21:45 crc kubenswrapper[4769]: E0131 17:21:45.710263 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:45 crc kubenswrapper[4769]: E0131 17:21:45.921182 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:46 crc kubenswrapper[4769]: I0131 17:21:46.006896 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"12199fda864950960e24cc0826b5adb7a3593c88facc340f930d7288077db2d2"} Jan 31 17:21:46 crc kubenswrapper[4769]: I0131 17:21:46.007570 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:46 crc kubenswrapper[4769]: I0131 17:21:46.007629 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:46 crc kubenswrapper[4769]: I0131 17:21:46.007725 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:46 crc kubenswrapper[4769]: E0131 17:21:46.008046 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:48 crc kubenswrapper[4769]: I0131 17:21:48.708763 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:21:48 crc kubenswrapper[4769]: I0131 17:21:48.709693 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:21:48 crc kubenswrapper[4769]: I0131 17:21:48.709753 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:21:48 crc kubenswrapper[4769]: I0131 17:21:48.709917 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:21:48 crc kubenswrapper[4769]: E0131 17:21:48.710486 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:21:51 crc kubenswrapper[4769]: E0131 17:21:51.784492 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ring-data-devices], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" podUID="54c0116b-a027-4f11-8b6b-aa00778f1acb" Jan 31 17:21:52 crc kubenswrapper[4769]: I0131 17:21:52.059950 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.708340 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:21:57 crc kubenswrapper[4769]: E0131 17:21:57.709014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.709521 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.709538 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.709591 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.709657 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:21:57 crc kubenswrapper[4769]: E0131 17:21:57.709720 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:21:57 crc kubenswrapper[4769]: I0131 17:21:57.709745 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:21:57 crc kubenswrapper[4769]: E0131 17:21:57.710014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:21:59 crc kubenswrapper[4769]: I0131 17:21:59.709173 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:21:59 crc kubenswrapper[4769]: I0131 17:21:59.709331 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:21:59 crc kubenswrapper[4769]: I0131 17:21:59.709536 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:21:59 crc kubenswrapper[4769]: I0131 17:21:59.709552 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:21:59 crc kubenswrapper[4769]: I0131 17:21:59.709625 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:21:59 crc kubenswrapper[4769]: E0131 17:21:59.710250 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:22:01 crc kubenswrapper[4769]: I0131 17:22:01.709876 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:01 crc kubenswrapper[4769]: I0131 17:22:01.710022 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:01 crc kubenswrapper[4769]: I0131 17:22:01.710073 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:22:01 crc kubenswrapper[4769]: I0131 17:22:01.710233 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:01 crc kubenswrapper[4769]: E0131 17:22:01.710884 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:08 crc kubenswrapper[4769]: I0131 17:22:08.708625 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:22:08 crc kubenswrapper[4769]: E0131 17:22:08.710349 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.248300 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" exitCode=1 Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.248365 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5"} Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.249980 4769 scope.go:117] "RemoveContainer" containerID="85f1f039507b8b306801c6bb4ff81c81e24ef3782c3aa1af52cb2accc8cbf579" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252579 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252660 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252689 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252769 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252778 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.252820 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:22:11 crc kubenswrapper[4769]: E0131 17:22:11.260100 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.708621 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.708651 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:22:11 crc kubenswrapper[4769]: E0131 17:22:11.708915 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.709020 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.709161 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:22:11 crc kubenswrapper[4769]: I0131 17:22:11.709289 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:22:11 crc kubenswrapper[4769]: E0131 17:22:11.709682 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:22:13 crc kubenswrapper[4769]: I0131 17:22:13.708030 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:13 crc kubenswrapper[4769]: I0131 17:22:13.708141 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:13 crc kubenswrapper[4769]: I0131 17:22:13.708181 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:22:13 crc kubenswrapper[4769]: I0131 17:22:13.708283 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:13 crc kubenswrapper[4769]: E0131 17:22:13.708732 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:22 crc kubenswrapper[4769]: I0131 17:22:22.712921 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:22:22 crc kubenswrapper[4769]: E0131 17:22:22.713792 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:22:23 crc kubenswrapper[4769]: I0131 17:22:23.708849 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:22:23 crc kubenswrapper[4769]: I0131 17:22:23.709233 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:22:23 crc kubenswrapper[4769]: I0131 17:22:23.709350 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:22:23 crc kubenswrapper[4769]: E0131 17:22:23.709848 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.709820 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.709989 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.710046 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.710161 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.710178 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:22:24 crc kubenswrapper[4769]: I0131 17:22:24.710259 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:22:24 crc kubenswrapper[4769]: E0131 17:22:24.711165 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:22:25 crc kubenswrapper[4769]: I0131 17:22:25.709212 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:22:25 crc kubenswrapper[4769]: I0131 17:22:25.709259 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:22:25 crc kubenswrapper[4769]: E0131 17:22:25.709649 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:22:28 crc kubenswrapper[4769]: I0131 17:22:28.709099 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:28 crc kubenswrapper[4769]: I0131 17:22:28.709425 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:28 crc kubenswrapper[4769]: I0131 17:22:28.709457 4769 scope.go:117] "RemoveContainer" containerID="921f9ad2d900230c5d58ee57d922edeeccea40bfc73f3cb1ac1a70b295b43a54" Jan 31 17:22:28 crc kubenswrapper[4769]: I0131 17:22:28.709560 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:28 crc kubenswrapper[4769]: E0131 17:22:28.868113 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:29 crc kubenswrapper[4769]: I0131 17:22:29.449112 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"9707c76fede02da2f5a5297c9d3af6e443c3362d8678066c35f22c92d4c569c0"} Jan 31 17:22:29 crc kubenswrapper[4769]: I0131 17:22:29.451776 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:29 crc kubenswrapper[4769]: I0131 17:22:29.452151 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:29 crc kubenswrapper[4769]: I0131 17:22:29.452485 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:29 crc kubenswrapper[4769]: E0131 17:22:29.453712 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:33 crc kubenswrapper[4769]: I0131 17:22:33.708525 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:22:33 crc kubenswrapper[4769]: E0131 17:22:33.709162 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:22:34 crc kubenswrapper[4769]: I0131 17:22:34.711200 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:22:34 crc kubenswrapper[4769]: I0131 17:22:34.711292 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:22:34 crc kubenswrapper[4769]: I0131 17:22:34.711623 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:22:34 crc kubenswrapper[4769]: E0131 17:22:34.712055 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.707924 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.707953 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:22:36 crc kubenswrapper[4769]: E0131 17:22:36.708132 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708682 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708762 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708789 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708846 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708855 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:22:36 crc kubenswrapper[4769]: I0131 17:22:36.708895 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:22:36 crc kubenswrapper[4769]: E0131 17:22:36.709319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:22:40 crc kubenswrapper[4769]: I0131 17:22:40.708717 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:40 crc kubenswrapper[4769]: I0131 17:22:40.708859 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:40 crc kubenswrapper[4769]: I0131 17:22:40.709054 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:40 crc kubenswrapper[4769]: E0131 17:22:40.709746 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:45 crc kubenswrapper[4769]: I0131 17:22:45.708582 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:22:45 crc kubenswrapper[4769]: E0131 17:22:45.709272 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:22:47 crc kubenswrapper[4769]: I0131 17:22:47.708740 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:22:47 crc kubenswrapper[4769]: I0131 17:22:47.709079 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:22:47 crc kubenswrapper[4769]: I0131 17:22:47.709361 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:22:47 crc kubenswrapper[4769]: I0131 17:22:47.709485 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:22:47 crc kubenswrapper[4769]: E0131 17:22:47.709475 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:22:47 crc kubenswrapper[4769]: I0131 17:22:47.709696 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:22:47 crc kubenswrapper[4769]: E0131 17:22:47.710275 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708016 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708082 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708102 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708159 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708165 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:22:48 crc kubenswrapper[4769]: I0131 17:22:48.708196 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:22:48 crc kubenswrapper[4769]: E0131 17:22:48.708546 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.651091 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" exitCode=1 Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.651213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0"} Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.651668 4769 scope.go:117] "RemoveContainer" containerID="6edbc4cca89431b559a2d473c7779b547c8c943dbb854d38e4273a9fd27c649f" Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.653406 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.653597 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.653667 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:22:51 crc kubenswrapper[4769]: I0131 17:22:51.653854 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:22:51 crc kubenswrapper[4769]: E0131 17:22:51.654554 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:22:52 crc kubenswrapper[4769]: I0131 17:22:52.712425 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:22:52 crc kubenswrapper[4769]: I0131 17:22:52.712796 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:22:52 crc kubenswrapper[4769]: I0131 17:22:52.712922 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:22:52 crc kubenswrapper[4769]: E0131 17:22:52.713275 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:22:58 crc kubenswrapper[4769]: I0131 17:22:58.709602 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:22:58 crc kubenswrapper[4769]: E0131 17:22:58.710156 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709277 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709323 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709366 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709481 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709554 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:22:59 crc kubenswrapper[4769]: E0131 17:22:59.709541 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709709 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709725 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:22:59 crc kubenswrapper[4769]: I0131 17:22:59.709868 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:22:59 crc kubenswrapper[4769]: E0131 17:22:59.710441 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.708622 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709016 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709041 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709100 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709164 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709227 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:23:05 crc kubenswrapper[4769]: I0131 17:23:05.709319 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:23:06 crc kubenswrapper[4769]: E0131 17:23:06.324099 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818532 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" exitCode=1 Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818565 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" exitCode=1 Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818601 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerStarted","Data":"a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818653 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818673 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.818695 4769 scope.go:117] "RemoveContainer" containerID="4a808d9f2da7be4edf651d6d424731ea4ceaeed364b4417a065b21c22246d30c" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.819470 4769 scope.go:117] "RemoveContainer" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.819569 4769 scope.go:117] "RemoveContainer" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.819605 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:06 crc kubenswrapper[4769]: E0131 17:23:06.819952 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.838962 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470" exitCode=1 Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839004 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94" exitCode=1 Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839021 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerStarted","Data":"af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839087 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839102 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94"} Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839823 4769 scope.go:117] "RemoveContainer" containerID="ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.839918 4769 scope.go:117] "RemoveContainer" containerID="cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470" Jan 31 17:23:06 crc kubenswrapper[4769]: E0131 17:23:06.840374 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.873284 4769 scope.go:117] "RemoveContainer" containerID="93b9536f213a9a8ebff163e44fa064fea71fe72fd3236438bcb9d2c1082f94fd" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.914512 4769 scope.go:117] "RemoveContainer" containerID="bc16d6c35d321e7f226f326ede73d4f7a8c9f8e22dfc09695519261eeb85d0b3" Jan 31 17:23:06 crc kubenswrapper[4769]: I0131 17:23:06.955056 4769 scope.go:117] "RemoveContainer" containerID="c6269a4105bb6ea59db61469209797c1723266af84a047a8fd15c8ff6b20fe7c" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.861595 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" containerID="af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2" exitCode=1 Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.861782 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-1" event={"ID":"a2e45b15-42ba-44b4-91c5-fa5bc64d7a43","Type":"ContainerDied","Data":"af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2"} Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.862040 4769 scope.go:117] "RemoveContainer" containerID="d4d0768906439206ffe8cc39e0b86534e43578159e313ff70f760a5885b2534a" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.862594 4769 scope.go:117] "RemoveContainer" containerID="ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.862647 4769 scope.go:117] "RemoveContainer" containerID="cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.862743 4769 scope.go:117] "RemoveContainer" containerID="af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2" Jan 31 17:23:07 crc kubenswrapper[4769]: E0131 17:23:07.862996 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.878311 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" containerID="a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06" exitCode=1 Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.878376 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-2" event={"ID":"fd794dbe-f3dd-4a87-8b3f-612f46a05b2b","Type":"ContainerDied","Data":"a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06"} Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.878879 4769 scope.go:117] "RemoveContainer" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.878934 4769 scope.go:117] "RemoveContainer" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.878953 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.879007 4769 scope.go:117] "RemoveContainer" containerID="a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06" Jan 31 17:23:07 crc kubenswrapper[4769]: E0131 17:23:07.879263 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:07 crc kubenswrapper[4769]: I0131 17:23:07.947278 4769 scope.go:117] "RemoveContainer" containerID="1d248f00feafbc9afe8eb2636aa7b6e4ee44e68fbe01f11d313641cc945e74d3" Jan 31 17:23:08 crc kubenswrapper[4769]: I0131 17:23:08.939282 4769 scope.go:117] "RemoveContainer" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" Jan 31 17:23:08 crc kubenswrapper[4769]: I0131 17:23:08.940320 4769 scope.go:117] "RemoveContainer" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" Jan 31 17:23:08 crc kubenswrapper[4769]: I0131 17:23:08.940402 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:08 crc kubenswrapper[4769]: I0131 17:23:08.940539 4769 scope.go:117] "RemoveContainer" containerID="a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06" Jan 31 17:23:08 crc kubenswrapper[4769]: E0131 17:23:08.941138 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708279 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708591 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708726 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708755 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708801 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708807 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:23:11 crc kubenswrapper[4769]: I0131 17:23:11.708838 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:23:11 crc kubenswrapper[4769]: E0131 17:23:11.708904 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:23:11 crc kubenswrapper[4769]: E0131 17:23:11.709137 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:13 crc kubenswrapper[4769]: I0131 17:23:13.707914 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:13 crc kubenswrapper[4769]: I0131 17:23:13.708470 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:23:13 crc kubenswrapper[4769]: E0131 17:23:13.708929 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:19 crc kubenswrapper[4769]: I0131 17:23:19.709880 4769 scope.go:117] "RemoveContainer" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" Jan 31 17:23:19 crc kubenswrapper[4769]: I0131 17:23:19.710339 4769 scope.go:117] "RemoveContainer" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" Jan 31 17:23:19 crc kubenswrapper[4769]: I0131 17:23:19.710384 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:19 crc kubenswrapper[4769]: I0131 17:23:19.710539 4769 scope.go:117] "RemoveContainer" containerID="a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06" Jan 31 17:23:19 crc kubenswrapper[4769]: E0131 17:23:19.711078 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.719233 4769 scope.go:117] "RemoveContainer" containerID="ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.719395 4769 scope.go:117] "RemoveContainer" containerID="cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.719708 4769 scope.go:117] "RemoveContainer" containerID="af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.719881 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.720011 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.720060 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.720160 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.720177 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:23:22 crc kubenswrapper[4769]: I0131 17:23:22.720248 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:23:22 crc kubenswrapper[4769]: E0131 17:23:22.720462 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:23:22 crc kubenswrapper[4769]: E0131 17:23:22.720907 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:25 crc kubenswrapper[4769]: I0131 17:23:25.708782 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:23:25 crc kubenswrapper[4769]: E0131 17:23:25.709319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:23:27 crc kubenswrapper[4769]: I0131 17:23:27.708354 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:27 crc kubenswrapper[4769]: I0131 17:23:27.708729 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:23:27 crc kubenswrapper[4769]: E0131 17:23:27.847914 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:28 crc kubenswrapper[4769]: I0131 17:23:28.100771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerStarted","Data":"4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65"} Jan 31 17:23:28 crc kubenswrapper[4769]: I0131 17:23:28.100981 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:23:28 crc kubenswrapper[4769]: I0131 17:23:28.101307 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:28 crc kubenswrapper[4769]: E0131 17:23:28.101606 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.116810 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" containerID="4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65" exitCode=1 Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.116891 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" event={"ID":"fb764692-fbb8-4fb4-860c-2cd0e0cfd452","Type":"ContainerDied","Data":"4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65"} Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.117314 4769 scope.go:117] "RemoveContainer" containerID="20329b69b54456b9125eb1ef9a1aeb204bed6414354d722188fd5d824c019f40" Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.117790 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.117834 4769 scope.go:117] "RemoveContainer" containerID="4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65" Jan 31 17:23:29 crc kubenswrapper[4769]: E0131 17:23:29.118314 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:29 crc kubenswrapper[4769]: I0131 17:23:29.645229 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.130008 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.130059 4769 scope.go:117] "RemoveContainer" containerID="4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65" Jan 31 17:23:30 crc kubenswrapper[4769]: E0131 17:23:30.130447 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.710304 4769 scope.go:117] "RemoveContainer" containerID="68f04fe518d96347d9e65d354e865224a5d1059dfd9cde3741b8f8d9d844b1d3" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.710546 4769 scope.go:117] "RemoveContainer" containerID="fc7f2d2594ebce2c09ad64d8b46ef4f1e382cfe66edfb561c37dbd58453bfb27" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.710590 4769 scope.go:117] "RemoveContainer" containerID="f2ab18d8403d1c91a041e56533da4b639b8b5c29c38ca01b64ebf011146357b0" Jan 31 17:23:30 crc kubenswrapper[4769]: I0131 17:23:30.710716 4769 scope.go:117] "RemoveContainer" containerID="a306086c08c3ab7cb4ff137f2c1802778475ce8efacf2ba8dc8c2a8b538b6b06" Jan 31 17:23:30 crc kubenswrapper[4769]: E0131 17:23:30.711252 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=container-updater pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-2_swift-kuttl-tests(fd794dbe-f3dd-4a87-8b3f-612f46a05b2b)\"]" pod="swift-kuttl-tests/swift-storage-2" podUID="fd794dbe-f3dd-4a87-8b3f-612f46a05b2b" Jan 31 17:23:31 crc kubenswrapper[4769]: I0131 17:23:31.143399 4769 scope.go:117] "RemoveContainer" containerID="d3d906994824dc2b8dba6f0135caee56ad13f546c2022a4bc88e51a1c15a9443" Jan 31 17:23:31 crc kubenswrapper[4769]: I0131 17:23:31.143868 4769 scope.go:117] "RemoveContainer" containerID="4a92499c0d66f85b98ed19650a001472e471c9aa05fa36b1d0dd29d4d1753c65" Jan 31 17:23:31 crc kubenswrapper[4769]: E0131 17:23:31.144240 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"proxy-httpd\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\", failed to \"StartContainer\" for \"proxy-server\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-gcpxn_swift-kuttl-tests(fb764692-fbb8-4fb4-860c-2cd0e0cfd452)\"]" pod="swift-kuttl-tests/swift-proxy-7d8cf99555-gcpxn" podUID="fb764692-fbb8-4fb4-860c-2cd0e0cfd452" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709458 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709597 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709635 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709712 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709723 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:23:33 crc kubenswrapper[4769]: I0131 17:23:33.709778 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:23:34 crc kubenswrapper[4769]: I0131 17:23:34.175925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerStarted","Data":"238ae868926d2b60f60ad7d4f1077b44d0d3aa96883d22f5c847b94d66ba99e2"} Jan 31 17:23:34 crc kubenswrapper[4769]: E0131 17:23:34.543998 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211400 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="238ae868926d2b60f60ad7d4f1077b44d0d3aa96883d22f5c847b94d66ba99e2" exitCode=1 Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211751 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="84857df8289b62d02e1c8ddcc0f990a7ebf98ca3062a027caf73a7debeb17ffc" exitCode=1 Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211765 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="151220f3c0faf81f2e321af261fd36dd54464c6c92061329c000954b22c368de" exitCode=1 Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211773 4769 generic.go:334] "Generic (PLEG): container finished" podID="13aa61f9-8314-4571-afce-8c24594fa917" containerID="844b103bec1e06539cf22b81c3253ee7e7890ee7b0cef991a18a623132d515b9" exitCode=1 Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211475 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"238ae868926d2b60f60ad7d4f1077b44d0d3aa96883d22f5c847b94d66ba99e2"} Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211821 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"84857df8289b62d02e1c8ddcc0f990a7ebf98ca3062a027caf73a7debeb17ffc"} Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211843 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"151220f3c0faf81f2e321af261fd36dd54464c6c92061329c000954b22c368de"} Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="swift-kuttl-tests/swift-storage-0" event={"ID":"13aa61f9-8314-4571-afce-8c24594fa917","Type":"ContainerDied","Data":"844b103bec1e06539cf22b81c3253ee7e7890ee7b0cef991a18a623132d515b9"} Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.211886 4769 scope.go:117] "RemoveContainer" containerID="34043e4c4acd3bb4c77eb869798bcbcf46c6b144268550d7dd4bf295127b4033" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212474 4769 scope.go:117] "RemoveContainer" containerID="238ae868926d2b60f60ad7d4f1077b44d0d3aa96883d22f5c847b94d66ba99e2" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212636 4769 scope.go:117] "RemoveContainer" containerID="844b103bec1e06539cf22b81c3253ee7e7890ee7b0cef991a18a623132d515b9" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212698 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212794 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212808 4769 scope.go:117] "RemoveContainer" containerID="151220f3c0faf81f2e321af261fd36dd54464c6c92061329c000954b22c368de" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.212919 4769 scope.go:117] "RemoveContainer" containerID="84857df8289b62d02e1c8ddcc0f990a7ebf98ca3062a027caf73a7debeb17ffc" Jan 31 17:23:35 crc kubenswrapper[4769]: E0131 17:23:35.213646 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.284798 4769 scope.go:117] "RemoveContainer" containerID="e371ba0abd96fed529279e8c987705bc2490becc412646d62670c62aab9f16e2" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.337803 4769 scope.go:117] "RemoveContainer" containerID="acd95b5565bae3340b8e173514ccf400cc7303c10cd705067a889467cb1bf9a2" Jan 31 17:23:35 crc kubenswrapper[4769]: I0131 17:23:35.391078 4769 scope.go:117] "RemoveContainer" containerID="31c5410734988a34733bf2816d43f274001a7710f20cd41dd446e56fc21fd125" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.230865 4769 scope.go:117] "RemoveContainer" containerID="238ae868926d2b60f60ad7d4f1077b44d0d3aa96883d22f5c847b94d66ba99e2" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.230935 4769 scope.go:117] "RemoveContainer" containerID="844b103bec1e06539cf22b81c3253ee7e7890ee7b0cef991a18a623132d515b9" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.230959 4769 scope.go:117] "RemoveContainer" containerID="0305291a9f97d723baf7ea6865f351f37d4c15692a2e5e718215bdfd3ee0b7e5" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.231006 4769 scope.go:117] "RemoveContainer" containerID="e7a452392201a600b28e38e04241b9ad1abed5d326346ae4baeb2dcfd3e4514e" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.231014 4769 scope.go:117] "RemoveContainer" containerID="151220f3c0faf81f2e321af261fd36dd54464c6c92061329c000954b22c368de" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.231046 4769 scope.go:117] "RemoveContainer" containerID="84857df8289b62d02e1c8ddcc0f990a7ebf98ca3062a027caf73a7debeb17ffc" Jan 31 17:23:36 crc kubenswrapper[4769]: E0131 17:23:36.231485 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-updater\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-updater pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\", failed to \"StartContainer\" for \"container-sharder\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-sharder pod=swift-storage-0_swift-kuttl-tests(13aa61f9-8314-4571-afce-8c24594fa917)\"]" pod="swift-kuttl-tests/swift-storage-0" podUID="13aa61f9-8314-4571-afce-8c24594fa917" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.709969 4769 scope.go:117] "RemoveContainer" containerID="ea2101b1416d40c7010fb5552ae0bae007ad6fab3292999bdf6a34da86ec8e94" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.710327 4769 scope.go:117] "RemoveContainer" containerID="cf3f1f137646349643b2d7de8f78265abc59fb6af31ec84637ea42eb3a8ef470" Jan 31 17:23:36 crc kubenswrapper[4769]: I0131 17:23:36.710421 4769 scope.go:117] "RemoveContainer" containerID="af97b9c0afac7e00ecda0289ca441a79363054feaff48779e7c32e0e6c78b8c2" Jan 31 17:23:36 crc kubenswrapper[4769]: E0131 17:23:36.710701 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=account-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"container-replicator\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=container-replicator pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\", failed to \"StartContainer\" for \"object-expirer\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=object-expirer pod=swift-storage-1_swift-kuttl-tests(a2e45b15-42ba-44b4-91c5-fa5bc64d7a43)\"]" pod="swift-kuttl-tests/swift-storage-1" podUID="a2e45b15-42ba-44b4-91c5-fa5bc64d7a43" Jan 31 17:23:38 crc kubenswrapper[4769]: I0131 17:23:38.708613 4769 scope.go:117] "RemoveContainer" containerID="4458fcd40801fe3787795388074fefeaa434873f4716c2298183466301cf956f" Jan 31 17:23:38 crc kubenswrapper[4769]: E0131 17:23:38.709083 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-4bqbm_openshift-machine-config-operator(1d352f75-43f7-4b8c-867e-cfb17bbbe011)\"" pod="openshift-machine-config-operator/machine-config-daemon-4bqbm" podUID="1d352f75-43f7-4b8c-867e-cfb17bbbe011" Jan 31 17:23:38 crc kubenswrapper[4769]: I0131 17:23:38.902027 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices\") pod \"swift-ring-rebalance-2sjs2\" (UID: \"54c0116b-a027-4f11-8b6b-aa00778f1acb\") " pod="swift-kuttl-tests/swift-ring-rebalance-2sjs2" Jan 31 17:23:38 crc kubenswrapper[4769]: E0131 17:23:38.902188 4769 configmap.go:193] Couldn't get configMap swift-kuttl-tests/swift-ring-config-data: configmap "swift-ring-config-data" not found Jan 31 17:23:38 crc kubenswrapper[4769]: E0131 17:23:38.902264 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices podName:54c0116b-a027-4f11-8b6b-aa00778f1acb nodeName:}" failed. No retries permitted until 2026-01-31 17:25:40.902248615 +0000 UTC m=+3388.976417284 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "ring-data-devices" (UniqueName: "kubernetes.io/configmap/54c0116b-a027-4f11-8b6b-aa00778f1acb-ring-data-devices") pod "swift-ring-rebalance-2sjs2" (UID: "54c0116b-a027-4f11-8b6b-aa00778f1acb") : configmap "swift-ring-config-data" not found var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515137435251024453 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015137435252017371 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015137426453016517 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015137426453015467 5ustar corecore